summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig22
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c108
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c14
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c64
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/utids.c3
-rw-r--r--drivers/acpi/apei/hest.c2
-rw-r--r--drivers/acpi/arm64/iort.c108
-rw-r--r--drivers/acpi/battery.c255
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/cm_sbs.c87
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c8
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/nfit/nfit.h2
-rw-r--r--drivers/acpi/numa/srat.c10
-rw-r--r--drivers/acpi/osl.c63
-rw-r--r--drivers/acpi/processor_idle.c13
-rw-r--r--drivers/acpi/property.c17
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/scan.c8
-rw-r--r--drivers/acpi/spcr.c4
-rw-r--r--drivers/acpi/sysfs.c12
-rw-r--r--drivers/acpi/tables.c25
-rw-r--r--drivers/android/binder.c23
-rw-r--r--drivers/android/binder_alloc.c1
-rw-r--r--drivers/android/binderfs.c3
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/atm/atmtcp.c10
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/arch_topology.c11
-rw-r--r--drivers/base/power/domain.c194
-rw-r--r--drivers/base/power/domain_governor.c12
-rw-r--r--drivers/base/power/sysfs.c9
-rw-r--r--drivers/base/regmap/regmap-irq.c53
-rw-r--r--drivers/base/regmap/regmap.c33
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h8
-rw-r--r--drivers/block/drbd/drbd_main.c71
-rw-r--r--drivers/block/drbd/drbd_nl.c10
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_state.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c7
-rw-r--r--drivers/block/loop.c17
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/null_blk_main.c34
-rw-r--r--drivers/block/null_blk_zoned.c16
-rw-r--r--drivers/block/pktcdvd.c15
-rw-r--r--drivers/block/ps3vram.c20
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/core.c30
-rw-r--r--drivers/block/rsxx/dev.c14
-rw-r--r--drivers/block/skd_main.c9
-rw-r--r--drivers/block/umem.c11
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/zram/zram_drv.c14
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c88
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c109
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c36
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h6
-rw-r--r--drivers/bus/fsl-mc/mc-io.c2
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c4
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/cdrom/cdrom.c28
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/hw_random/Kconfig27
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/ba431-rng.c235
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c5
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/hisi-rng.c2
-rw-r--r--drivers/char/hw_random/ingenic-rng.c154
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c2
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/hw_random/npcm-rng.c2
-rw-r--r--drivers/char/hw_random/octeon-rng.c6
-rw-r--r--drivers/char/hw_random/omap-rng.c11
-rw-r--r--drivers/char/hw_random/pic32-rng.c2
-rw-r--r--drivers/char/hw_random/st-rng.c3
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/mwave/smapi.c4
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/char/tpm/eventlog/acpi.c63
-rw-r--r--drivers/char/tpm/tpm-chip.c9
-rw-r--r--drivers/char/tpm/tpm.h5
-rw-r--r--drivers/char/tpm/tpm2-space.c26
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c70
-rw-r--r--drivers/char/tpm/tpmrm-dev.c2
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-scmi.c22
-rw-r--r--drivers/clk/imx/clk-imx8mp.c1
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/st/clkgen-fsyn.c1
-rw-r--r--drivers/clocksource/Kconfig15
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/ingenic-sysost.c539
-rw-r--r--drivers/clocksource/ingenic-timer.c182
-rw-r--r--drivers/clocksource/nomadik-mtu.c11
-rw-r--r--drivers/clocksource/sh_cmt.c2
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c103
-rw-r--r--drivers/clocksource/timer-ti-32k.c2
-rw-r--r--drivers/clocksource/timer-ti-dm.c2
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c122
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c22
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c24
-rw-r--r--drivers/cpufreq/cpufreq_performance.c14
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c18
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c18
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c6
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c187
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c19
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c76
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c74
-rw-r--r--drivers/cpuidle/cpuidle-psci.c141
-rw-r--r--drivers/cpuidle/cpuidle-psci.h11
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c8
-rw-r--r--drivers/crypto/Kconfig19
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c46
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c42
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c39
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h26
-rw-r--r--drivers/crypto/amlogic/Kconfig2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c27
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h3
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/bcm/cipher.c72
-rw-r--r--drivers/crypto/caam/caamalg.c37
-rw-r--r--drivers/crypto/caam/caamalg_qi.c8
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c42
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c12
-rw-r--r--drivers/crypto/caam/dpseci.c18
-rw-r--r--drivers/crypto/caam/dpseci.h2
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h1
-rw-r--r--drivers/crypto/caam/error.c3
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/caam/regs.h11
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c28
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c24
-rw-r--r--drivers/crypto/cavium/cpt/request_manager.h26
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c16
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c34
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev.h13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c43
-rw-r--r--drivers/crypto/ccp/sp-dev.c6
-rw-r--r--drivers/crypto/ccp/sp-dev.h6
-rw-r--r--drivers/crypto/ccp/sp-pci.c17
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c149
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c87
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c111
-rw-r--r--drivers/crypto/hisilicon/qm.c43
-rw-r--r--drivers/crypto/hisilicon/qm.h1
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c58
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c95
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c132
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c6
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c5
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h3
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c47
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c18
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c11
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h1
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c18
-rw-r--r--drivers/crypto/marvell/cesa/hash.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c51
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c9
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h24
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c63
-rw-r--r--drivers/crypto/mxs-dcp.c33
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/omap-aes.c41
-rw-r--r--drivers/crypto/omap-aes.h3
-rw-r--r--drivers/crypto/omap-des.c6
-rw-r--r--drivers/crypto/omap-sham.c18
-rw-r--r--drivers/crypto/picoxcell_crypto.c55
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h102
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c144
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c50
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h72
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h58
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h60
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c56
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c110
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.h52
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h54
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h75
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h106
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h145
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_la.h206
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_pke.h100
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hal.h48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h64
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h54
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c211
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c61
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c48
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h48
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c88
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c77
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c74
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c48
-rw-r--r--drivers/crypto/qce/cipher.h3
-rw-r--r--drivers/crypto/qce/common.h2
-rw-r--r--drivers/crypto/qce/sha.c36
-rw-r--r--drivers/crypto/qce/skcipher.c43
-rw-r--r--drivers/crypto/sa2ul.c2420
-rw-r--r--drivers/crypto/sa2ul.h403
-rw-r--r--drivers/crypto/sahara.c96
-rw-r--r--drivers/crypto/talitos.c117
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c4
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c1
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c195
-rw-r--r--drivers/devfreq/rk3399_dmc.c42
-rw-r--r--drivers/dma/ti/k3-udma-glue.c42
-rw-r--r--drivers/dma/ti/k3-udma.c34
-rw-r--r--drivers/edac/edac_device_sysfs.c1
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/ghes_edac.c323
-rw-r--r--drivers/edac/i10nm_base.c12
-rw-r--r--drivers/edac/mce_amd.c3
-rw-r--r--drivers/edac/pnd2_edac.c1
-rw-r--r--drivers/edac/skx_base.c2
-rw-r--r--drivers/firewire/ohci.c14
-rw-r--r--drivers/firmware/arm_scmi/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/base.c108
-rw-r--r--drivers/firmware/arm_scmi/clock.c20
-rw-r--r--drivers/firmware/arm_scmi/common.h4
-rw-r--r--drivers/firmware/arm_scmi/driver.c15
-rw-r--r--drivers/firmware/arm_scmi/notify.c1526
-rw-r--r--drivers/firmware/arm_scmi/notify.h68
-rw-r--r--drivers/firmware/arm_scmi/perf.c151
-rw-r--r--drivers/firmware/arm_scmi/power.c92
-rw-r--r--drivers/firmware/arm_scmi/reset.c96
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c12
-rw-r--r--drivers/firmware/arm_scmi/sensors.c69
-rw-r--r--drivers/firmware/arm_scmi/smc.c1
-rw-r--r--drivers/firmware/efi/embedded-firmware.c9
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c2
-rw-r--r--drivers/firmware/imx/imx-scu-soc.c (renamed from drivers/soc/imx/soc-imx-scu.c)83
-rw-r--r--drivers/firmware/imx/imx-scu.c4
-rw-r--r--drivers/firmware/imx/rm.c45
-rw-r--r--drivers/firmware/imx/scu-pd.c14
-rw-r--r--drivers/firmware/qcom_scm.c8
-rw-r--r--drivers/firmware/qemu_fw_cfg.c7
-rw-r--r--drivers/firmware/smccc/Kconfig9
-rw-r--r--drivers/firmware/smccc/Makefile1
-rw-r--r--drivers/firmware/smccc/soc_id.c114
-rw-r--r--drivers/firmware/stratix10-rsu.c170
-rw-r--r--drivers/firmware/stratix10-svc.c17
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c436
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/firmware/ti_sci.h2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c166
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c19
-rw-r--r--drivers/fpga/dfl-afu-error.c17
-rw-r--r--drivers/fpga/dfl-afu-main.c32
-rw-r--r--drivers/fpga/dfl-fme-error.c18
-rw-r--r--drivers/fpga/dfl-fme-main.c6
-rw-r--r--drivers/fpga/dfl-pci.c78
-rw-r--r--drivers/fpga/dfl.c313
-rw-r--r--drivers/fpga/dfl.h63
-rw-r--r--drivers/fpga/fpga-bridge.c6
-rw-r--r--drivers/fpga/fpga-mgr.c4
-rw-r--r--drivers/fpga/xilinx-spi.c61
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c1
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c10
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c30
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c16
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c4
-rw-r--r--drivers/greybus/es2.c2
-rw-r--r--drivers/greybus/interface.c2
-rw-r--r--drivers/hwmon/fam15h_power.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h1
-rw-r--r--drivers/hwtracing/coresight/coresight.c166
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-puv3.c275
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c26
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/i2c-core-slave.c7
-rw-r--r--drivers/ide/ide-acpi.c2
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-io-std.c4
-rw-r--r--drivers/ide/ide-io.c8
-rw-r--r--drivers/ide/ide-sysfs.c2
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/ide/umc8672.c2
-rw-r--r--drivers/idle/intel_idle.c55
-rw-r--r--drivers/infiniband/core/cq.c14
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c29
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c33
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c2
-rw-r--r--drivers/input/serio/hil_mlc.c2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h70
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/input/serio/serio_raw.c2
-rw-r--r--drivers/input/touchscreen/sur40.c4
-rw-r--r--drivers/interconnect/core.c16
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c6
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel/iommu.c2
-rw-r--r--drivers/iommu/intel/irq_remapping.c8
-rw-r--r--drivers/iommu/of_iommu.c81
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/irq-ativic32.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c8
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c11
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c8
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c105
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c19
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-imx-intmux.c70
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c6
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c32
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c11
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c7
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c30
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c12
-rw-r--r--drivers/irqchip/irq-stm32-exti.c166
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c8
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c2
-rw-r--r--drivers/irqchip/irq-vic.c26
-rw-r--r--drivers/irqchip/irqchip.c29
-rw-r--r--drivers/irqchip/qcom-pdc.c8
-rw-r--r--drivers/leds/leds-s3c24xx.c36
-rw-r--r--drivers/lightnvm/core.c8
-rw-r--r--drivers/lightnvm/pblk-init.c16
-rw-r--r--drivers/lightnvm/pblk-read.c2
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/md/Kconfig8
-rw-r--r--drivers/md/bcache/Kconfig2
-rw-r--r--drivers/md/bcache/Makefile2
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h33
-rw-r--r--drivers/md/bcache/bset.c2
-rw-r--r--drivers/md/bcache/btree.c14
-rw-r--r--drivers/md/bcache/features.c75
-rw-r--r--drivers/md/bcache/features.h86
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c9
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/request.c72
-rw-r--r--drivers/md/bcache/request.h4
-rw-r--r--drivers/md/bcache/super.c302
-rw-r--r--drivers/md/bcache/sysfs.c14
-rw-r--r--drivers/md/bcache/writeback.c22
-rw-r--r--drivers/md/bcache/writeback.h19
-rw-r--r--drivers/md/dm-cache-target.c25
-rw-r--r--drivers/md/dm-clone-target.c25
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-era-target.c17
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid.c12
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-rq.c3
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-table.c61
-rw-r--r--drivers/md/dm-thin.c20
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-writecache.c4
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/md-bitmap.c2
-rw-r--r--drivers/md/md-cluster.c1
-rw-r--r--drivers/md/md-faulty.c4
-rw-r--r--drivers/md/md-linear.c28
-rw-r--r--drivers/md/md-multipath.c27
-rw-r--r--drivers/md/md.c230
-rw-r--r--drivers/md/md.h13
-rw-r--r--drivers/md/raid0.c24
-rw-r--r--drivers/md/raid1.c45
-rw-r--r--drivers/md/raid10.c74
-rw-r--r--drivers/md/raid5-cache.c28
-rw-r--r--drivers/md/raid5-ppl.c11
-rw-r--r--drivers/md/raid5.c423
-rw-r--r--drivers/md/raid5.h53
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/tuners/qt1010.c4
-rw-r--r--drivers/media/usb/gspca/vicam.c2
-rw-r--r--drivers/media/usb/uvc/uvc_video.c8
-rw-r--r--drivers/memory/Kconfig15
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_dpfe.c7
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/da8xx-ddrctl.c2
-rw-r--r--drivers/memory/emif-asm-offsets.c10
-rw-r--r--drivers/memory/emif.c23
-rw-r--r--drivers/memory/fsl_ifc.c30
-rw-r--r--drivers/memory/jz4780-nemc.c17
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/memory/mvebu-devbus.c20
-rw-r--r--drivers/memory/of_memory.c32
-rw-r--r--drivers/memory/of_memory.h21
-rw-r--r--drivers/memory/omap-gpmc.c66
-rw-r--r--drivers/memory/pl172.c19
-rw-r--r--drivers/memory/renesas-rpc-if.c603
-rw-r--r--drivers/memory/samsung/Kconfig7
-rw-r--r--drivers/memory/samsung/exynos-srom.c22
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c29
-rw-r--r--drivers/memory/tegra/Kconfig14
-rw-r--r--drivers/memory/tegra/Makefile4
-rw-r--r--drivers/memory/tegra/mc.h1
-rw-r--r--drivers/memory/tegra/tegra124-emc.c7
-rw-r--r--drivers/memory/tegra/tegra186-emc.c25
-rw-r--r--drivers/memory/tegra/tegra186.c4
-rw-r--r--drivers/memory/tegra/tegra20-emc.c34
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c1775
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2100
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c90
-rw-r--r--drivers/memory/tegra/tegra210-emc.h1016
-rw-r--r--drivers/memory/tegra/tegra210-mc.h50
-rw-r--r--drivers/memory/tegra/tegra30-emc.c122
-rw-r--r--drivers/memory/ti-aemif.c16
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/mfd/ioc3.c6
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/ad525x_dpot.c2
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rtl8411.c8
-rw-r--r--drivers/misc/cardreader/rts5209.c5
-rw-r--r--drivers/misc/cardreader/rts5227.c5
-rw-r--r--drivers/misc/cardreader/rts5228.c747
-rw-r--r--drivers/misc/cardreader/rts5228.h168
-rw-r--r--drivers/misc/cardreader/rts5229.c5
-rw-r--r--drivers/misc/cardreader/rts5249.c28
-rw-r--r--drivers/misc/cardreader/rts5260.c23
-rw-r--r--drivers/misc/cardreader/rts5261.c32
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c129
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h5
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c2
-rw-r--r--drivers/misc/cb710/core.c28
-rw-r--r--drivers/misc/cb710/sgbuf2.c1
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/hcalls.c42
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c4
-rw-r--r--drivers/misc/echo/echo.c6
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c4
-rw-r--r--drivers/misc/enclosure.c8
-rw-r--r--drivers/misc/genwqe/card_base.c32
-rw-r--r--drivers/misc/genwqe/card_ddcb.c20
-rw-r--r--drivers/misc/genwqe/card_debugfs.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c24
-rw-r--r--drivers/misc/genwqe/card_sysfs.c8
-rw-r--r--drivers/misc/genwqe/card_utils.c30
-rw-r--r--drivers/misc/habanalabs/Makefile11
-rw-r--r--drivers/misc/habanalabs/common/Makefile7
-rw-r--r--drivers/misc/habanalabs/common/asid.c (renamed from drivers/misc/habanalabs/asid.c)0
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c (renamed from drivers/misc/habanalabs/command_buffer.c)82
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c (renamed from drivers/misc/habanalabs/command_submission.c)92
-rw-r--r--drivers/misc/habanalabs/common/context.c (renamed from drivers/misc/habanalabs/context.c)39
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c (renamed from drivers/misc/habanalabs/debugfs.c)2
-rw-r--r--drivers/misc/habanalabs/common/device.c (renamed from drivers/misc/habanalabs/device.c)88
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c (renamed from drivers/misc/habanalabs/firmware_if.c)104
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h (renamed from drivers/misc/habanalabs/habanalabs.h)172
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c (renamed from drivers/misc/habanalabs/habanalabs_drv.c)1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c (renamed from drivers/misc/habanalabs/habanalabs_ioctl.c)24
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c (renamed from drivers/misc/habanalabs/hw_queue.c)165
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c (renamed from drivers/misc/habanalabs/hwmon.c)0
-rw-r--r--drivers/misc/habanalabs/common/irq.c (renamed from drivers/misc/habanalabs/irq.c)38
-rw-r--r--drivers/misc/habanalabs/common/memory.c (renamed from drivers/misc/habanalabs/memory.c)5
-rw-r--r--drivers/misc/habanalabs/common/mmu.c (renamed from drivers/misc/habanalabs/mmu.c)3
-rw-r--r--drivers/misc/habanalabs/common/pci.c (renamed from drivers/misc/habanalabs/pci.c)151
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c (renamed from drivers/misc/habanalabs/sysfs.c)3
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c909
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h24
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c12
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c5
-rw-r--r--drivers/misc/habanalabs/goya/Makefile2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c196
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h24
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c15
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c2
-rw-r--r--drivers/misc/habanalabs/include/common/armcp_if.h (renamed from drivers/misc/habanalabs/include/armcp_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h (renamed from drivers/misc/habanalabs/include/hl_boot_if.h)14
-rw-r--r--drivers/misc/habanalabs/include/common/qman_if.h (renamed from drivers/misc/habanalabs/include/qman_if.h)0
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h21
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h4
-rw-r--r--drivers/misc/hpilo.c11
-rw-r--r--drivers/misc/hpilo.h22
-rw-r--r--drivers/misc/ibmasm/command.c6
-rw-r--r--drivers/misc/ibmasm/dot_command.c6
-rw-r--r--drivers/misc/ibmasm/event.c4
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmvmc.c6
-rw-r--r--drivers/misc/lattice-ecp3-config.c19
-rw-r--r--drivers/misc/lkdtm/bugs.c53
-rw-r--r--drivers/misc/lkdtm/heap.c9
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c22
-rw-r--r--drivers/misc/lkdtm/usercopy.c7
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/bus-fixup.c23
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c8
-rw-r--r--drivers/misc/mei/hbm.c74
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c4
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c66
-rw-r--r--drivers/misc/mei/hw-me.h9
-rw-r--r--drivers/misc/mei/hw-txe.c5
-rw-r--r--drivers/misc/mei/hw.h8
-rw-r--r--drivers/misc/mei/main.c31
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c10
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c10
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c4
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c1
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c8
-rw-r--r--drivers/misc/mic/host/mic_intr.c4
-rw-r--r--drivers/misc/mic/host/mic_main.c1
-rw-r--r--drivers/misc/mic/host/mic_x100.c13
-rw-r--r--drivers/misc/mic/host/mic_x100.h9
-rw-r--r--drivers/misc/mic/scif/scif_api.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c3
-rw-r--r--drivers/misc/mic/scif/scif_epd.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c34
-rw-r--r--drivers/misc/mic/scif/scif_nm.c17
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c18
-rw-r--r--drivers/misc/mic/scif/scif_ports.c9
-rw-r--r--drivers/misc/mic/scif/scif_rma.c12
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/misc/ocxl/config.c18
-rw-r--r--drivers/misc/pch_phub.c57
-rw-r--r--drivers/misc/phantom.c20
-rw-r--r--drivers/misc/pti.c16
-rw-r--r--drivers/misc/sgi-gru/grufault.c1
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c1
-rw-r--r--drivers/misc/sgi-gru/grukservices.c1
-rw-r--r--drivers/misc/sgi-xp/xp_main.c4
-rw-r--r--drivers/misc/sram-exec.c2
-rw-r--r--drivers/misc/ti-st/st_core.c79
-rw-r--r--drivers/misc/ti-st/st_kim.c71
-rw-r--r--drivers/misc/tifm_7xx1.c30
-rw-r--r--drivers/misc/uacce/uacce.c9
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/host/jz4740_mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mtd/mtdchar.c56
-rw-r--r--drivers/mtd/nand/raw/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c2
-rw-r--r--drivers/mtd/parsers/afs.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig11
-rw-r--r--drivers/mtd/spi-nor/controllers/Makefile1
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mux/adgs1408.c6
-rw-r--r--drivers/net/bareudp.c29
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c10
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/lan78xx.c113
-rw-r--r--drivers/net/vxlan.c16
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/lo.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c8
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/pmem.c5
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c582
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c10
-rw-r--r--drivers/nvme/host/hwmon.c5
-rw-r--r--drivers/nvme/host/lightnvm.c4
-rw-r--r--drivers/nvme/host/multipath.c55
-rw-r--r--drivers/nvme/host/nvme.h100
-rw-r--r--drivers/nvme/host/pci.c197
-rw-r--r--drivers/nvme/host/rdma.c134
-rw-r--r--drivers/nvme/host/tcp.c109
-rw-r--r--drivers/nvme/host/zns.c256
-rw-r--r--drivers/nvme/target/Kconfig12
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c26
-rw-r--r--drivers/nvme/target/configfs.c117
-rw-r--r--drivers/nvme/target/core.c81
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fc.c30
-rw-r--r--drivers/nvme/target/fcloop.c29
-rw-r--r--drivers/nvme/target/loop.c17
-rw-r--r--drivers/nvme/target/nvmet.h60
-rw-r--r--drivers/nvme/target/passthru.c544
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/nvme/target/tcp.c13
-rw-r--r--drivers/nvmem/Kconfig3
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c4
-rw-r--r--drivers/nvmem/qfprom.c315
-rw-r--r--drivers/nvmem/sc27xx-efuse.c27
-rw-r--r--drivers/nvmem/sprd-efuse.c4
-rw-r--r--drivers/of/base.c42
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/of/irq.c34
-rw-r--r--drivers/opp/core.c3
-rw-r--r--drivers/opp/of.c76
-rw-r--r--drivers/opp/ti-opp-supply.c2
-rw-r--r--drivers/parisc/sba_iommu.c14
-rw-r--r--drivers/pci/controller/vmd.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c14
-rw-r--r--drivers/pci/msi.c9
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/quirks.c13
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c3
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile17
-rw-r--r--drivers/phy/allwinner/Kconfig2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/allwinner/phy-sun6i-mipi-dphy.c4
-rw-r--r--drivers/phy/broadcom/Kconfig8
-rw-r--r--drivers/phy/broadcom/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c457
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c2
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c45
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c3
-rw-r--r--drivers/phy/phy-core.c5
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/Kconfig34
-rw-r--r--drivers/phy/qualcomm/Makefile4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c571
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c510
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c85
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h131
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c172
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h168
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c226
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h226
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c648
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c63
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/samsung/Kconfig17
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c4
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c4
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c16
-rw-r--r--drivers/phy/samsung/phy-exynos7-ufs.h81
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c366
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h139
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c11
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c5
-rw-r--r--drivers/phy/xilinx/Kconfig13
-rw-r--r--drivers/phy/xilinx/Makefile3
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c993
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c74
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c1
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c5
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c114
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c45
-rw-r--r--drivers/platform/x86/Kconfig23
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c16
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c77
-rw-r--r--drivers/platform/x86/dell-wmi.c28
-rw-r--r--drivers/platform/x86/hdaps.c4
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/intel_atomisp2_led.c116
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_common.c14
-rw-r--r--drivers/platform/x86/intel_pmc_core.c4
-rw-r--r--drivers/platform/x86/mlx-platform.c141
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c3
-rw-r--r--drivers/platform/x86/system76_acpi.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c157
-rw-r--r--drivers/powercap/idle_inject.c10
-rw-r--r--drivers/powercap/intel_rapl_common.c77
-rw-r--r--drivers/powercap/intel_rapl_msr.c15
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-puv3.c150
-rw-r--r--drivers/regulator/Kconfig55
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/ab8500.c7
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/core.c115
-rw-r--r--drivers/regulator/cpcap-regulator.c18
-rw-r--r--drivers/regulator/cros-ec-regulator.c252
-rw-r--r--drivers/regulator/da9211-regulator.c30
-rw-r--r--drivers/regulator/dbx500-prcmu.c8
-rw-r--r--drivers/regulator/devres.c54
-rw-r--r--drivers/regulator/fan53880.c184
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/gpio-regulator.c9
-rw-r--r--drivers/regulator/hi6421-regulator.c2
-rw-r--r--drivers/regulator/hi6421v530-regulator.c2
-rw-r--r--drivers/regulator/lp873x-regulator.c2
-rw-r--r--drivers/regulator/lp87565-regulator.c23
-rw-r--r--drivers/regulator/ltc3676.c2
-rw-r--r--drivers/regulator/max14577-regulator.c2
-rw-r--r--drivers/regulator/max8907-regulator.c6
-rw-r--r--drivers/regulator/max8997-regulator.c14
-rw-r--r--drivers/regulator/max8998.c2
-rw-r--r--drivers/regulator/mp886x.c5
-rw-r--r--drivers/regulator/mt6397-regulator.c17
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/regulator/pbias-regulator.c2
-rw-r--r--drivers/regulator/pca9450-regulator.c833
-rw-r--r--drivers/regulator/pfuze100-regulator.c9
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c175
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c14
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c6
-rw-r--r--drivers/regulator/qcom_smd-regulator.c43
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c24
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c97
-rw-r--r--drivers/regulator/stpmic1_regulator.c2
-rw-r--r--drivers/regulator/sy8827n.c185
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps65086-regulator.c2
-rw-r--r--drivers/regulator/tps65217-regulator.c9
-rw-r--r--drivers/regulator/tps65218-regulator.c6
-rw-r--r--drivers/regulator/tps65912-regulator.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c10
-rw-r--r--drivers/regulator/wm8400-regulator.c6
-rw-r--r--drivers/reset/reset-intel-gw.c24
-rw-r--r--drivers/reset/reset-simple.c23
-rw-r--r--drivers/reset/reset-simple.h41
-rw-r--r--drivers/reset/reset-socfpga.c3
-rw-r--r--drivers/reset/reset-sunxi.c3
-rw-r--r--drivers/reset/reset-ti-sci.c2
-rw-r--r--drivers/reset/reset-ti-syscon.c2
-rw-r--r--drivers/reset/reset-uniphier-glue.c3
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-puv3.c286
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_diag.c33
-rw-r--r--drivers/s390/block/dcssblk.c12
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/block/xpram.c8
-rw-r--r--drivers/s390/char/tty3270.c12
-rw-r--r--drivers/s390/char/zcore.c57
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_main.c41
-rw-r--r--drivers/s390/crypto/ap_bus.c319
-rw-r--r--drivers/s390/crypto/ap_bus.h69
-rw-r--r--drivers/s390/crypto/ap_queue.c209
-rw-r--r--drivers/s390/crypto/pkey_api.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c176
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c69
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c129
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c30
-rw-r--r--drivers/s390/crypto/zcrypt_error.h4
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c64
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c112
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h4
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c8
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/scsi_lib.c20
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/soc/imx/Kconfig10
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c46
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/pdr_interface.c4
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c165
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c19
-rw-r--r--drivers/soc/qcom/rpmh.c4
-rw-r--r--drivers/soc/qcom/smd-rpm.c5
-rw-r--r--drivers/soc/qcom/socinfo.c65
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a774e1-sysc.c43
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/samsung/Kconfig3
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/exynos-regulator-coupler.c221
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c2
-rw-r--r--drivers/soc/ti/k3-ringacc.c200
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c2
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c22
-rw-r--r--drivers/soc/versatile/soc-integrator.c48
-rw-r--r--drivers/soc/versatile/soc-realview.c48
-rw-r--r--drivers/soundwire/Makefile10
-rw-r--r--drivers/soundwire/bus.c130
-rw-r--r--drivers/soundwire/bus_type.c19
-rw-r--r--drivers/soundwire/cadence_master.c70
-rw-r--r--drivers/soundwire/cadence_master.h4
-rw-r--r--drivers/soundwire/intel.c549
-rw-r--r--drivers/soundwire/intel.h22
-rw-r--r--drivers/soundwire/intel_init.c356
-rw-r--r--drivers/soundwire/qcom.c4
-rw-r--r--drivers/soundwire/stream.c98
-rw-r--r--drivers/spi/Kconfig38
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c14
-rw-r--r--drivers/spi/spi-altera.c179
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-at91-usart.c7
-rw-r--r--drivers/spi/spi-atmel.c4
-rw-r--r--drivers/spi/spi-bcm2835.c51
-rw-r--r--drivers/spi/spi-bcm2835aux.c6
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c12
-rw-r--r--drivers/spi/spi-bcm63xx.c12
-rw-r--r--drivers/spi/spi-bitbang.c6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c (renamed from drivers/mtd/spi-nor/controllers/cadence-quadspi.c)541
-rw-r--r--drivers/spi/spi-cadence.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c1
-rw-r--r--drivers/spi/spi-coldfire-qspi.c4
-rw-r--r--drivers/spi/spi-davinci.c8
-rw-r--r--drivers/spi/spi-dw-dma.c14
-rw-r--r--drivers/spi/spi-ep93xx.c4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c79
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c18
-rw-r--r--drivers/spi/spi-geni-qcom.c372
-rw-r--r--drivers/spi/spi-img-spfi.c56
-rw-r--r--drivers/spi/spi-imx.c248
-rw-r--r--drivers/spi/spi-lantiq-ssc.c177
-rw-r--r--drivers/spi/spi-loopback-test.c16
-rw-r--r--drivers/spi/spi-mem.c16
-rw-r--r--drivers/spi/spi-meson-spicc.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-mt65xx.c3
-rw-r--r--drivers/spi/spi-mtk-nor.c10
-rw-r--r--drivers/spi/spi-mxic.c3
-rw-r--r--drivers/spi/spi-npcm-fiu.c6
-rw-r--r--drivers/spi/spi-npcm-pspi.c28
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-omap-100k.c1
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c19
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pl022.c12
-rw-r--r--drivers/spi/spi-ppc4xx.c106
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qcom-qspi.c117
-rw-r--r--drivers/spi/spi-rockchip.c46
-rw-r--r--drivers/spi/spi-rpc-if.c216
-rw-r--r--drivers/spi/spi-s3c64xx.c9
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c77
-rw-r--r--drivers/spi/spi-ti-qspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c59
-rw-r--r--drivers/spi/spi-zynq-qspi.c14
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/spi/spi.c73
-rw-r--r--drivers/spi/spidev.c21
-rw-r--r--drivers/ssb/driver_chipcommon.c4
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/tee/optee/core.c27
-rw-r--r--drivers/tee/optee/device.c38
-rw-r--r--drivers/tee/optee/optee_private.h10
-rw-r--r--drivers/thermal/cpufreq_cooling.c12
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c4
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c210
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c19
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c6
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/vdpa/Kconfig1
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/vhost.c5
-rw-r--r--drivers/video/fbdev/Kconfig11
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/fb-puv3.c836
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c6
-rw-r--r--drivers/video/fbdev/pm3fb.c6
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c3
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c266
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h23
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c1
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_balloon.c11
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/xen/Kconfig1
1105 files changed, 36247 insertions, 16514 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ce2730d61a8f..7540a5179a47 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -32,7 +32,7 @@ menuconfig ACPI
Linux support for ACPI is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). For more information on the
ACPI CA, see:
- <http://acpica.org/>
+ <https://acpica.org/>
ACPI is an open industry specification originally co-developed by
Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba. Currently,
@@ -40,8 +40,7 @@ menuconfig ACPI
the UEFI Forum and any UEFI member can join the ASWG and contribute
to the ACPI specification.
The specification is available at:
- <http://www.acpi.info>
- <http://www.uefi.org/acpi/specs>
+ <https://uefi.org/specifications>
if ACPI
@@ -99,23 +98,6 @@ config ACPI_SLEEP
depends on ACPI_SYSTEM_POWER_STATES_SUPPORT
default y
-config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi directories"
- depends on X86 && PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated power /proc/acpi/ directories to exist, even when
- they have been replaced by functions in /sys.
- The deprecated directories (and their replacements) include:
- /proc/acpi/battery/* (/sys/class/power_supply/*) and
- /proc/acpi/ac_adapter/* (sys/class/power_supply/*).
- This option has no effect on /proc/acpi/ directories
- and functions which do not yet exist in /sys.
- This option, together with the proc directories, will be
- deleted in the future.
-
- Say N to delete power /proc/acpi/ directories that have moved to /sys.
-
config ACPI_REV_OVERRIDE_POSSIBLE
bool "Allow supported ACPI revision to be overridden"
depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index e81e1ebbfb32..9a957544e357 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,7 +55,6 @@ acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 69d2db13886b..46a64e9fa716 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -13,10 +13,6 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
@@ -66,12 +62,6 @@ static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-#endif
-
-
static int ac_sleep_before_get_state_ms;
static int ac_check_pmic = 1;
@@ -150,77 +140,6 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_ac *ac = seq->private;
-
-
- if (!ac)
- return 0;
-
- if (acpi_ac_get_state(ac)) {
- seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
- return 0;
- }
-
- seq_puts(seq, "state: ");
- switch (ac->state) {
- case ACPI_AC_STATUS_OFFLINE:
- seq_puts(seq, "off-line\n");
- break;
- case ACPI_AC_STATUS_ONLINE:
- seq_puts(seq, "on-line\n");
- break;
- default:
- seq_puts(seq, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int acpi_ac_add_fs(struct acpi_ac *ac)
-{
- struct proc_dir_entry *entry = NULL;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(ac->device)) {
- acpi_device_dir(ac->device) =
- proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
- if (!acpi_device_dir(ac->device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_single_data(ACPI_AC_FILE_STATE, S_IRUGO,
- acpi_device_dir(ac->device), acpi_ac_seq_show, ac);
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_ac *ac)
-{
-
- if (acpi_device_dir(ac->device)) {
- remove_proc_entry(ACPI_AC_FILE_STATE,
- acpi_device_dir(ac->device));
- remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
- acpi_device_dir(ac->device) = NULL;
- }
-
- return 0;
-}
-#endif
-
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
@@ -236,7 +155,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
- /* fall through */
+ fallthrough;
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -348,11 +267,6 @@ static int acpi_ac_add(struct acpi_device *device)
psy_cfg.drv_data = ac;
ac->charger_desc.name = acpi_device_bid(device);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_ac_add_fs(ac);
- if (result)
- goto end;
-#endif
ac->charger_desc.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger_desc.properties = ac_props;
ac->charger_desc.num_properties = ARRAY_SIZE(ac_props);
@@ -372,9 +286,6 @@ static int acpi_ac_add(struct acpi_device *device)
register_acpi_notifier(&ac->battery_nb);
end:
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(ac);
-#endif
kfree(ac);
}
@@ -418,10 +329,6 @@ static int acpi_ac_remove(struct acpi_device *device)
power_supply_unregister(ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(ac);
-#endif
-
kfree(ac);
return 0;
@@ -447,18 +354,8 @@ static int __init acpi_ac_init(void)
}
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
- return -ENODEV;
-#endif
-
-
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
return -ENODEV;
}
@@ -468,9 +365,6 @@ static int __init acpi_ac_init(void)
static void __exit acpi_ac_exit(void)
{
acpi_bus_unregister_driver(&acpi_ac_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index e7dc0133f817..6cc4c92d9ff9 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -88,7 +88,7 @@ static void round_robin_cpu(unsigned int tsk_index)
cpumask_var_t tmp;
int cpu;
unsigned long min_weight = -1;
- unsigned long uninitialized_var(preferred_cpu);
+ unsigned long preferred_cpu;
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 5379bc3f275d..b51ddf3bb616 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -79,7 +79,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* PIIX4 models.
*/
errata.piix4.throttle = 1;
- /* fall through*/
+ fallthrough;
case 2: /* PIIX4E */
case 3: /* PIIX4M */
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index aefc0145e583..89be3ccdad53 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -38,6 +38,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
union acpi_operand_object *region_desc =
(union acpi_operand_object *)handle;
struct acpi_mem_space_context *local_region_context;
+ struct acpi_mem_mapping *mm;
ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
@@ -46,13 +47,14 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
local_region_context =
(struct acpi_mem_space_context *)*region_context;
- /* Delete a cached mapping if present */
+ /* Delete memory mappings if present */
- if (local_region_context->mapped_length) {
- acpi_os_unmap_memory(local_region_context->
- mapped_logical_address,
- local_region_context->
- mapped_length);
+ while (local_region_context->first_mm) {
+ mm = local_region_context->first_mm;
+ local_region_context->first_mm = mm->next_mm;
+ acpi_os_unmap_memory(mm->logical_address,
+ mm->length);
+ ACPI_FREE(mm);
}
ACPI_FREE(local_region_context);
*region_context = NULL;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index a4e306690a21..4a0f03157e08 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
(u8)access_byte_width;
}
}
- /* An additional reference for the container */
-
- acpi_ut_add_reference(obj_desc->field.region_obj);
-
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index d15a66de26c0..4914dbc44517 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -41,6 +41,7 @@ acpi_ex_system_memory_space_handler(u32 function,
acpi_status status = AE_OK;
void *logical_addr_ptr = NULL;
struct acpi_mem_space_context *mem_info = region_context;
+ struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
acpi_size page_boundary_map_length;
@@ -96,20 +97,37 @@ acpi_ex_system_memory_space_handler(u32 function,
* Is 1) Address below the current mapping? OR
* 2) Address beyond the current mapping?
*/
- if ((address < mem_info->mapped_physical_address) ||
- (((u64) address + length) > ((u64)
- mem_info->mapped_physical_address +
- mem_info->mapped_length))) {
+ if (!mm || (address < mm->physical_address) ||
+ ((u64) address + length > (u64) mm->physical_address + mm->length)) {
/*
- * The request cannot be resolved by the current memory mapping;
- * Delete the existing mapping and create a new one.
+ * The request cannot be resolved by the current memory mapping.
+ *
+ * Look for an existing saved mapping covering the address range
+ * at hand. If found, save it as the current one and carry out
+ * the access.
*/
- if (mem_info->mapped_length) {
+ for (mm = mem_info->first_mm; mm; mm = mm->next_mm) {
+ if (mm == mem_info->cur_mm)
+ continue;
+
+ if (address < mm->physical_address)
+ continue;
- /* Valid mapping, delete it */
+ if ((u64) address + length >
+ (u64) mm->physical_address + mm->length)
+ continue;
- acpi_os_unmap_memory(mem_info->mapped_logical_address,
- mem_info->mapped_length);
+ mem_info->cur_mm = mm;
+ goto access;
+ }
+
+ /* Create a new mappings list entry */
+ mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm));
+ if (!mm) {
+ ACPI_ERROR((AE_INFO,
+ "Unable to save memory mapping at 0x%8.8X%8.8X, size %u",
+ ACPI_FORMAT_UINT64(address), length));
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
@@ -143,29 +161,39 @@ acpi_ex_system_memory_space_handler(u32 function,
/* Create a new mapping starting at the address given */
- mem_info->mapped_logical_address =
- acpi_os_map_memory(address, map_length);
- if (!mem_info->mapped_logical_address) {
+ logical_addr_ptr = acpi_os_map_memory(address, map_length);
+ if (!logical_addr_ptr) {
ACPI_ERROR((AE_INFO,
"Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_UINT64(address),
(u32)map_length));
- mem_info->mapped_length = 0;
+ ACPI_FREE(mm);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Save the physical address and mapping size */
- mem_info->mapped_physical_address = address;
- mem_info->mapped_length = map_length;
+ mm->logical_address = logical_addr_ptr;
+ mm->physical_address = address;
+ mm->length = map_length;
+
+ /*
+ * Add the new entry to the mappigs list and save it as the
+ * current mapping.
+ */
+ mm->next_mm = mem_info->first_mm;
+ mem_info->first_mm = mm;
+
+ mem_info->cur_mm = mm;
}
+access:
/*
* Generate a logical pointer corresponding to the address we want to
* access
*/
- logical_addr_ptr = mem_info->mapped_logical_address +
- ((u64) address - (u64) mem_info->mapped_physical_address);
+ logical_addr_ptr = mm->logical_address +
+ ((u64) address - (u64) mm->physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c365faf4e6cd..4c0d4e434196 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
next_object = object->buffer_field.buffer_obj;
break;
- case ACPI_TYPE_LOCAL_REGION_FIELD:
-
- next_object = object->field.region_obj;
- break;
-
case ACPI_TYPE_LOCAL_BANK_FIELD:
next_object = object->bank_field.bank_obj;
@@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
break;
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_REGION:
default:
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 3bb06935a2ad..3e68864ef242 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -263,8 +263,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
* 3) Size of the actual CID strings
*/
cid_list_size = sizeof(struct acpi_pnp_device_id_list) +
- ((count - 1) * sizeof(struct acpi_pnp_device_id)) +
- string_area_size;
+ (count * sizeof(struct acpi_pnp_device_id)) + string_area_size;
cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
if (!cid_list) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 953a2fae8b15..6e980fe16772 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -227,7 +227,7 @@ __setup("hest_disable", setup_hest_disable);
void __init acpi_hest_init(void)
{
acpi_status status;
- int rc = -ENODEV;
+ int rc;
unsigned int ghes_count = 0;
if (hest_disable) {
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 28a6b387e80e..ec782e4a0fe4 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -264,15 +264,31 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
+ struct acpi_device *adev;
struct acpi_iort_named_component *ncomp;
+ struct device *nc_dev = dev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * IORT for a device matching a named component if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(nc_dev);
+ if (adev)
+ break;
+
+ nc_dev = nc_dev->parent;
+ } while (nc_dev);
if (!adev)
goto out;
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
if (ACPI_FAILURE(status)) {
- dev_warn(dev, "Can't get device full path name\n");
+ dev_warn(nc_dev, "Can't get device full path name\n");
goto out;
}
@@ -534,7 +550,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
node = iort_get_iort_node(dev->fwnode);
if (node)
return node;
-
/*
* if not, then it should be a platform device defined in
* DSDT/SSDT (with Named Component node in IORT)
@@ -543,32 +558,29 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
iort_match_node_callback, dev);
}
- /* Find a PCI root bus */
pbus = to_pci_dev(dev)->bus;
- while (!pci_is_root_bus(pbus))
- pbus = pbus->parent;
return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &pbus->dev);
}
/**
- * iort_msi_map_rid() - Map a MSI requester ID for a device
+ * iort_msi_map_id() - Map a MSI input ID for a device
* @dev: The device for which the mapping is to be done.
- * @req_id: The device requester ID.
+ * @input_id: The device input ID.
*
- * Returns: mapped MSI RID on success, input requester ID otherwise
+ * Returns: mapped MSI ID on success, input ID otherwise
*/
-u32 iort_msi_map_rid(struct device *dev, u32 req_id)
+u32 iort_msi_map_id(struct device *dev, u32 input_id)
{
struct acpi_iort_node *node;
u32 dev_id;
node = iort_find_dev_node(dev);
if (!node)
- return req_id;
+ return input_id;
- iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
+ iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
@@ -625,13 +637,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
- * @req_id: Device's requester ID
+ * @id: Device's ID
* @idx: Index of the ITS identifier list.
* @its_id: ITS identifier.
*
* Returns: 0 on success, appropriate error value otherwise
*/
-static int iort_dev_find_its_id(struct device *dev, u32 req_id,
+static int iort_dev_find_its_id(struct device *dev, u32 id,
unsigned int idx, int *its_id)
{
struct acpi_iort_its_group *its;
@@ -641,7 +653,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
+ node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -664,19 +676,20 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
*
* Returns: the MSI domain for this device, NULL otherwise
*/
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token)
{
struct fwnode_handle *handle;
int its_id;
- if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
+ if (iort_dev_find_its_id(dev, id, 0, &its_id))
return NULL;
handle = iort_find_domain_token(its_id);
if (!handle)
return NULL;
- return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
+ return irq_find_matching_fwnode(handle, bus_token);
}
static void iort_set_device_domain(struct device *dev,
@@ -965,19 +978,54 @@ static void iort_named_component_init(struct device *dev,
nc->node_flags);
}
+static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
+{
+ struct acpi_iort_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 streamid = 0;
+
+ do {
+
+ parent = iort_node_map_platform_id(node, &streamid,
+ IORT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = iort_iommu_xlate(dev, parent, streamid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int iort_nc_iommu_map_id(struct device *dev,
+ struct acpi_iort_node *node,
+ const u32 *in_id)
+{
+ struct acpi_iort_node *parent;
+ u32 streamid;
+
+ parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
+ if (parent)
+ return iort_iommu_xlate(dev, parent, streamid);
+
+ return -ENODEV;
+}
+
+
/**
- * iort_iommu_configure - Set-up IOMMU configuration for a device.
+ * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
*
* @dev: device to configure
+ * @id_in: optional input id const value pointer
*
* Returns: iommu_ops pointer on configuration success
* NULL on configuration failure
*/
-const struct iommu_ops *iort_iommu_configure(struct device *dev)
+const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
+ const u32 *id_in)
{
- struct acpi_iort_node *node, *parent;
+ struct acpi_iort_node *node;
const struct iommu_ops *ops;
- u32 streamid = 0;
int err = -ENODEV;
/*
@@ -1006,21 +1054,13 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
- int i = 0;
-
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return NULL;
- do {
- parent = iort_node_map_platform_id(node, &streamid,
- IORT_IOMMU_TYPE,
- i++);
-
- if (parent)
- err = iort_iommu_xlate(dev, parent, streamid);
- } while (parent && !err);
+ err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
+ iort_nc_iommu_map(dev, node);
if (!err)
iort_named_component_init(dev, node);
@@ -1045,6 +1085,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
return ops;
}
+
#else
static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; }
@@ -1053,7 +1094,8 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops,
{ return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
-const struct iommu_ops *iort_iommu_configure(struct device *dev)
+const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
+ const u32 *input_id)
{ return NULL; }
#endif
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 366c389175d8..cab4af532f36 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -24,12 +24,6 @@
#include <asm/unaligned.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#endif
-
#include <linux/acpi.h>
#include <linux/power_supply.h>
@@ -69,11 +63,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-#endif
-
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
@@ -1024,226 +1013,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static const char *acpi_battery_units(const struct acpi_battery *battery)
-{
- return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
- "mA" : "mW";
-}
-
-static int acpi_battery_info_proc_show(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery, false);
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design capacity: unknown\n");
- else
- seq_printf(seq, "design capacity: %d %sh\n",
- battery->design_capacity,
- acpi_battery_units(battery));
-
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "last full capacity: unknown\n");
- else
- seq_printf(seq, "last full capacity: %d %sh\n",
- battery->full_charge_capacity,
- acpi_battery_units(battery));
-
- seq_printf(seq, "battery technology: %srechargeable\n",
- battery->technology ? "" : "non-");
-
- if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design voltage: unknown\n");
- else
- seq_printf(seq, "design voltage: %d mV\n",
- battery->design_voltage);
- seq_printf(seq, "design capacity warning: %d %sh\n",
- battery->design_capacity_warning,
- acpi_battery_units(battery));
- seq_printf(seq, "design capacity low: %d %sh\n",
- battery->design_capacity_low,
- acpi_battery_units(battery));
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: %d %sh\n",
- battery->capacity_granularity_1,
- acpi_battery_units(battery));
- seq_printf(seq, "capacity granularity 2: %d %sh\n",
- battery->capacity_granularity_2,
- acpi_battery_units(battery));
- seq_printf(seq, "model number: %s\n", battery->model_number);
- seq_printf(seq, "serial number: %s\n", battery->serial_number);
- seq_printf(seq, "battery type: %s\n", battery->type);
- seq_printf(seq, "OEM info: %s\n", battery->oem_info);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery info\n");
- return result;
-}
-
-static int acpi_battery_state_proc_show(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery, false);
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
-
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x04) ? "critical" : "ok");
- if ((battery->state & 0x01) && (battery->state & 0x02))
- seq_printf(seq,
- "charging state: charging/discharging\n");
- else if (battery->state & 0x01)
- seq_printf(seq, "charging state: discharging\n");
- else if (battery->state & 0x02)
- seq_printf(seq, "charging state: charging\n");
- else
- seq_printf(seq, "charging state: charged\n");
-
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present rate: unknown\n");
- else
- seq_printf(seq, "present rate: %d %s\n",
- battery->rate_now, acpi_battery_units(battery));
-
- if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "remaining capacity: unknown\n");
- else
- seq_printf(seq, "remaining capacity: %d %sh\n",
- battery->capacity_now, acpi_battery_units(battery));
- if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present voltage: unknown\n");
- else
- seq_printf(seq, "present voltage: %d mV\n",
- battery->voltage_now);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery state\n");
-
- return result;
-}
-
-static int acpi_battery_alarm_proc_show(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery, false);
-
- if (result)
- goto end;
-
- if (!acpi_battery_present(battery)) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
- seq_printf(seq, "alarm: ");
- if (battery->alarm) {
- seq_printf(seq, "%u %sh\n", battery->alarm,
- acpi_battery_units(battery));
- } else {
- seq_printf(seq, "unsupported\n");
- }
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery alarm\n");
- return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[12] = { '\0' };
- struct seq_file *m = file->private_data;
- struct acpi_battery *battery = m->private;
-
- if (!battery || (count > sizeof(alarm_string) - 1))
- return -EINVAL;
- if (!acpi_battery_present(battery)) {
- result = -ENODEV;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = '\0';
- if (kstrtoint(alarm_string, 0, &battery->alarm)) {
- result = -EINVAL;
- goto end;
- }
- result = acpi_battery_set_alarm(battery);
- end:
- if (result)
- return result;
- return count;
-}
-
-static int acpi_battery_alarm_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_alarm_proc_show, PDE_DATA(inode));
-}
-
-static const struct proc_ops acpi_battery_alarm_proc_ops = {
- .proc_open = acpi_battery_alarm_proc_open,
- .proc_read = seq_read,
- .proc_write = acpi_battery_write_alarm,
- .proc_lseek = seq_lseek,
- .proc_release = single_release,
-};
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
- pr_warn(PREFIX "Deprecated procfs I/F for battery is loaded, please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_battery_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- if (!proc_create_single_data("info", S_IRUGO, acpi_device_dir(device),
- acpi_battery_info_proc_show, acpi_driver_data(device)))
- return -ENODEV;
- if (!proc_create_single_data("state", S_IRUGO, acpi_device_dir(device),
- acpi_battery_state_proc_show, acpi_driver_data(device)))
- return -ENODEV;
- if (!proc_create_data("alarm", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device), &acpi_battery_alarm_proc_ops,
- acpi_driver_data(device)))
- return -ENODEV;
- return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
- if (!acpi_device_dir(device))
- return;
- remove_proc_subtree(acpi_device_bid(device), acpi_battery_dir);
- acpi_device_dir(device) = NULL;
-}
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1432,14 +1201,6 @@ static int acpi_battery_add(struct acpi_device *device)
if (result)
goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_battery_add_fs(device);
- if (result) {
- acpi_battery_remove_fs(device);
- goto fail;
- }
-#endif
-
pr_info(PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
@@ -1468,9 +1229,6 @@ static int acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
@@ -1531,16 +1289,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
}
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir)
- return;
-#endif
result = acpi_bus_register_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- if (result < 0)
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
battery_driver_registered = (result == 0);
}
@@ -1560,10 +1309,6 @@ static void __exit acpi_battery_exit(void)
acpi_bus_unregister_driver(&acpi_battery_driver);
battery_hook_exit();
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- if (acpi_battery_dir)
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 3c35e57dd854..a4eda7fe50d3 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -405,7 +405,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
event = ACPI_BUTTON_NOTIFY_STATUS;
- /* fall through */
+ fallthrough;
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644
index 0ca9f82de8ba..000000000000
--- a/drivers/acpi/cm_sbs.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
-#define _COMPONENT ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_ac_dir)
- acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
- if (acpi_ac_dir) {
- lock_ac_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_AC_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_ac_dir_param)
- lock_ac_dir_cnt--;
- if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
- remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
- acpi_ac_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_battery_dir) {
- acpi_battery_dir =
- proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
- }
- if (acpi_battery_dir) {
- lock_battery_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_BATTERY_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_battery_dir_param)
- lock_battery_dir_cnt--;
- if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
- && acpi_battery_dir) {
- remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
- acpi_battery_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
- return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index e3414131bfca..9bd72c26ef46 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -469,7 +469,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
- /* fall through */
+ fallthrough;
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 04ce2b96c3da..fcddda3d6712 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2059,13 +2059,13 @@ static int param_get_event_clearing(char *buffer,
{
switch (ec_event_clearing) {
case ACPI_EC_EVT_TIMING_STATUS:
- return sprintf(buffer, "status");
+ return sprintf(buffer, "status\n");
case ACPI_EC_EVT_TIMING_QUERY:
- return sprintf(buffer, "query");
+ return sprintf(buffer, "query\n");
case ACPI_EC_EVT_TIMING_EVENT:
- return sprintf(buffer, "event");
+ return sprintf(buffer, "event\n");
default:
- return sprintf(buffer, "invalid");
+ return sprintf(buffer, "invalid\n");
}
return 0;
}
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index ccd900690b6f..b1a7f8d6965e 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -106,7 +106,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
break;
- /* fall through */
+ fallthrough;
default:
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
break;
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index f5525f8bb770..a303f0123394 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -16,7 +16,7 @@
/* ACPI 6.1 */
#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
-/* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
+/* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 5be5a977da1b..15bbaab8500b 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -230,7 +230,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
pxm &= 0xff;
node = acpi_map_pxm_to_node(pxm);
- if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ if (node == NUMA_NO_NODE) {
pr_err("SRAT: Too many proximity domains.\n");
goto out_err_bad_srat;
}
@@ -291,8 +291,6 @@ acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
- if (!processor_affinity)
- return -EINVAL;
acpi_table_print_srat_entry(&header->common);
@@ -309,8 +307,6 @@ acpi_parse_processor_affinity(union acpi_subtable_headers *header,
struct acpi_srat_cpu_affinity *processor_affinity;
processor_affinity = (struct acpi_srat_cpu_affinity *)header;
- if (!processor_affinity)
- return -EINVAL;
acpi_table_print_srat_entry(&header->common);
@@ -327,8 +323,6 @@ acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
struct acpi_srat_gicc_affinity *processor_affinity;
processor_affinity = (struct acpi_srat_gicc_affinity *)header;
- if (!processor_affinity)
- return -EINVAL;
acpi_table_print_srat_entry(&header->common);
@@ -347,8 +341,6 @@ acpi_parse_memory_affinity(union acpi_subtable_headers * header,
struct acpi_srat_mem_affinity *memory_affinity;
memory_affinity = (struct acpi_srat_mem_affinity *)header;
- if (!memory_affinity)
- return -EINVAL;
acpi_table_print_srat_entry(&header->common);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 762c5d50b8fe..6ad8cb05f672 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -77,7 +77,10 @@ struct acpi_ioremap {
void __iomem *virt;
acpi_physical_address phys;
acpi_size size;
- unsigned long refcount;
+ union {
+ unsigned long refcount;
+ struct rcu_work rwork;
+ } track;
};
static LIST_HEAD(acpi_ioremaps);
@@ -250,7 +253,7 @@ void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
map = acpi_map_lookup(phys, size);
if (map) {
virt = map->virt + (phys - map->phys);
- map->refcount++;
+ map->track.refcount++;
}
mutex_unlock(&acpi_ioremap_lock);
return virt;
@@ -335,7 +338,7 @@ void __iomem __ref
/* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size);
if (map) {
- map->refcount++;
+ map->track.refcount++;
goto out;
}
@@ -358,7 +361,7 @@ void __iomem __ref
map->virt = virt;
map->phys = pg_off;
map->size = pg_sz;
- map->refcount = 1;
+ map->track.refcount = 1;
list_add_tail_rcu(&map->list, &acpi_ioremaps);
@@ -374,21 +377,26 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
-/* Must be called with mutex_lock(&acpi_ioremap_lock) */
-static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
+static void acpi_os_map_remove(struct work_struct *work)
{
- unsigned long refcount = --map->refcount;
+ struct acpi_ioremap *map = container_of(to_rcu_work(work),
+ struct acpi_ioremap,
+ track.rwork);
- if (!refcount)
- list_del_rcu(&map->list);
- return refcount;
+ acpi_unmap(map->phys, map->virt);
+ kfree(map);
}
-static void acpi_os_map_cleanup(struct acpi_ioremap *map)
+/* Must be called with mutex_lock(&acpi_ioremap_lock) */
+static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- synchronize_rcu_expedited();
- acpi_unmap(map->phys, map->virt);
- kfree(map);
+ if (--map->track.refcount)
+ return;
+
+ list_del_rcu(&map->list);
+
+ INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
+ queue_rcu_work(system_wq, &map->track.rwork);
}
/**
@@ -397,8 +405,8 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
* @size: Size of the address range to drop a reference to.
*
* Look up the given virtual address range in the list of existing ACPI memory
- * mappings, drop a reference to it and unmap it if there are no more active
- * references to it.
+ * mappings, drop a reference to it and if there are no more active references
+ * to it, queue it up for later removal.
*
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_unmap_table() to get the job done. Since
@@ -408,7 +416,6 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- unsigned long refcount;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size);
@@ -416,23 +423,27 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
}
mutex_lock(&acpi_ioremap_lock);
+
map = acpi_map_lookup_virt(virt, size);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
- refcount = acpi_os_drop_map_ref(map);
- mutex_unlock(&acpi_ioremap_lock);
+ acpi_os_drop_map_ref(map);
- if (!refcount)
- acpi_os_map_cleanup(map);
+ mutex_unlock(&acpi_ioremap_lock);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
+/**
+ * acpi_os_unmap_memory - Drop a memory mapping reference.
+ * @virt: Start of the address range to drop a reference to.
+ * @size: Size of the address range to drop a reference to.
+ */
void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{
- return acpi_os_unmap_iomem((void __iomem *)virt, size);
+ acpi_os_unmap_iomem((void __iomem *)virt, size);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
@@ -461,7 +472,6 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
- unsigned long refcount;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
@@ -472,16 +482,15 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
return;
mutex_lock(&acpi_ioremap_lock);
+
map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
}
- refcount = acpi_os_drop_map_ref(map);
- mutex_unlock(&acpi_ioremap_lock);
+ acpi_os_drop_map_ref(map);
- if (!refcount)
- acpi_os_map_cleanup(map);
+ mutex_unlock(&acpi_ioremap_lock);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 75534c5b5433..71a30b0d0f05 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -203,8 +203,7 @@ static void tsc_check_state(int state)
*/
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
-
- /*FALL THROUGH*/
+ fallthrough;
default:
/* TSC could halt in idle, so notify users */
if (state > ACPI_STATE_C1)
@@ -655,8 +654,8 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -664,16 +663,18 @@ static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct acpi_processor *pr = __this_cpu_read(processors);
if (unlikely(!pr))
- return;
+ return 0;
if (pr->flags.bm_check) {
acpi_idle_enter_bm(pr, cx, false);
- return;
+ return 0;
} else {
ACPI_FLUSH_CPU_CACHE();
}
}
acpi_idle_do_entry(cx);
+
+ return 0;
}
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e601c4511a8b..d04de10a63e4 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -45,6 +45,9 @@ static const guid_t prp_guids[] = {
/* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
GUID_INIT(0x6c501103, 0xc189, 0x4296,
0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
+ /* Storage device needs D3 GUID: 5025030f-842f-4ab4-a561-99a5189762d0 */
+ GUID_INIT(0x5025030f, 0x842f, 0x4ab4,
+ 0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0),
};
/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
@@ -606,13 +609,7 @@ static struct fwnode_handle *
acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
- char name[ACPI_PATH_SEGMENT_LENGTH];
struct fwnode_handle *child;
- struct acpi_buffer path;
- acpi_status status;
-
- path.length = sizeof(name);
- path.pointer = name;
fwnode_for_each_child_node(fwnode, child) {
if (is_acpi_data_node(child)) {
@@ -621,12 +618,8 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
continue;
}
- status = acpi_get_name(ACPI_HANDLE_FWNODE(child),
- ACPI_SINGLE_NAME, &path);
- if (ACPI_FAILURE(status))
- break;
-
- if (!strncmp(name, childname, ACPI_NAMESEG_SIZE))
+ if (!strncmp(acpi_device_bid(to_acpi_device_node(child)),
+ childname, ACPI_NAMESEG_SIZE))
return child;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 3b4448972374..ad04824ca3ba 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -373,7 +373,7 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
case ACPI_ACTIVE_BOTH:
if (triggering == ACPI_EDGE_SENSITIVE)
return IRQ_TYPE_EDGE_BOTH;
- /* fall through */
+ fallthrough;
default:
return IRQ_TYPE_NONE;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8777faced51a..2142f1554761 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1457,8 +1457,10 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
+ * @input_id: input device id const value pointer
*/
-int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
+ const u32 *input_id)
{
const struct iommu_ops *iommu;
u64 dma_addr = 0, size = 0;
@@ -1470,7 +1472,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
iort_dma_setup(dev, &dma_addr, &size);
- iommu = iort_iommu_configure(dev);
+ iommu = iort_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -1479,7 +1481,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_dma_configure);
+EXPORT_SYMBOL_GPL(acpi_dma_configure_id);
static void acpi_init_coherency(struct acpi_device *adev)
{
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index d73b4535e79d..88460bacd5ae 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -111,7 +111,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
- /* fall through */
+ fallthrough;
case 8:
iotype = "mmio";
break;
@@ -128,7 +128,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
switch (table->interface_type) {
case ACPI_DBG2_ARM_SBSA_32BIT:
iotype = "mmio32";
- /* fall through */
+ fallthrough;
case ACPI_DBG2_ARM_PL011:
case ACPI_DBG2_ARM_SBSA_GENERIC:
case ACPI_DBG2_BCM2835:
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 76c668c05fa0..a5cc4f3bb1e3 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -214,7 +214,7 @@ static int param_set_trace_method_name(const char *val,
static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
{
- return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
}
static const struct kernel_param_ops param_ops_trace_method = {
@@ -271,15 +271,15 @@ static int param_set_trace_state(const char *val,
static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
- return sprintf(buffer, "disable");
+ return sprintf(buffer, "disable\n");
else {
if (acpi_gbl_trace_method_name) {
if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
- return sprintf(buffer, "method-once");
+ return sprintf(buffer, "method-once\n");
else
- return sprintf(buffer, "method");
+ return sprintf(buffer, "method\n");
} else
- return sprintf(buffer, "enable");
+ return sprintf(buffer, "enable\n");
}
return 0;
}
@@ -302,7 +302,7 @@ static int param_get_acpica_version(char *buffer,
{
int result;
- result = sprintf(buffer, "%x", ACPI_CA_VERSION);
+ result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
return result;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 0e905c3d1645..e48690a006a4 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -292,20 +292,6 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
int errs = 0;
int i;
- if (acpi_disabled)
- return -ENODEV;
-
- if (!id)
- return -EINVAL;
-
- if (!table_size)
- return -EINVAL;
-
- if (!table_header) {
- pr_warn("%4.4s not present\n", id);
- return -ENODEV;
- }
-
table_end = (unsigned long)table_header + table_header->length;
/* Parse all entries looking for a match. */
@@ -371,6 +357,9 @@ int __init acpi_table_parse_entries_array(char *id,
if (!id)
return -EINVAL;
+ if (!table_size)
+ return -EINVAL;
+
if (!strncmp(id, ACPI_SIG_MADT, 4))
instance = acpi_apic_instance;
@@ -490,7 +479,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char * const table_sigs[] = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -501,7 +490,7 @@ static const char * const table_sigs[] = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT, NULL };
+ ACPI_SIG_NHLT };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
@@ -548,11 +537,11 @@ void __init acpi_table_upgrade(void)
table = file.data;
- for (sig = 0; table_sigs[sig]; sig++)
+ for (sig = 0; sig < ARRAY_SIZE(table_sigs); sig++)
if (!memcmp(table->signature, table_sigs[sig], 4))
break;
- if (!table_sigs[sig]) {
+ if (sig >= ARRAY_SIZE(table_sigs)) {
pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
cpio_path, file.name);
continue;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f50c5f182bb5..f936530a19b0 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1969,9 +1969,8 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_thread_dec_tmpref(target_thread);
binder_free_transaction(t);
return;
- } else {
- __release(&target_thread->proc->inner_lock);
}
+ __release(&target_thread->proc->inner_lock);
next = t->from_parent;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2760,11 +2759,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
binder_node_lock(node);
if (oneway) {
BUG_ON(thread);
- if (node->has_async_transaction) {
+ if (node->has_async_transaction)
pending_async = true;
- } else {
+ else
node->has_async_transaction = true;
- }
}
binder_inner_proc_lock(proc);
@@ -2982,6 +2980,12 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
+ if (WARN_ON(proc == target_proc)) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3635,10 +3639,17 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_node *ctx_mgr_node;
mutex_lock(&context->context_mgr_node_lock);
ctx_mgr_node = context->binder_context_mgr_node;
- if (ctx_mgr_node)
+ if (ctx_mgr_node) {
+ if (ctx_mgr_node->proc == proc) {
+ binder_user_error("%d:%d context manager tried to acquire desc 0\n",
+ proc->pid, thread->pid);
+ mutex_unlock(&context->context_mgr_node_lock);
+ return -EINVAL;
+ }
ret = binder_inc_ref_for_node(
proc, ctx_mgr_node,
strong, NULL, &rdata);
+ }
mutex_unlock(&context->context_mgr_node_lock);
}
if (ret)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index cbe6aa77d50d..69609696a843 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -547,6 +547,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
{
struct binder_buffer *prev, *next = NULL;
bool to_free = true;
+
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7cf566aafe1f..7b76fefde3f8 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler_types.h>
#include <linux/errno.h>
@@ -351,6 +351,7 @@ static const struct super_operations binderfs_super_ops = {
static inline bool is_binderfs_control_device(const struct dentry *dentry)
{
struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
return info->control_dentry == dentry;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 46336084b1a9..ec233208585b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -93,7 +93,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
struct ata_link *link;
struct ata_device *dev;
unsigned long now;
- unsigned int uninitialized_var(msecs);
+ unsigned int msecs;
int rc = 0;
ap = ata_shost_to_port(sdev->host);
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d9fd70280482..7f814da3c2d0 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
- if (!dev_data->persist) return 0;
+ if (!dev_data->persist) {
+ atm_dev_put(dev);
+ return 0;
+ }
dev_data->persist = 0;
- if (PRIV(dev)->vcc) return 0;
+ if (PRIV(dev)->vcc) {
+ atm_dev_put(dev);
+ return 0;
+ }
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 57f97b95a453..165eebe06e39 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -940,7 +940,7 @@ static int open_tx_first(struct atm_vcc *vcc)
vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
else {
- int uninitialized_var(pcr);
+ int pcr;
if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 4d0a0038b476..75f72d684294 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -54,6 +54,17 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
+
+void topology_set_thermal_pressure(const struct cpumask *cpus,
+ unsigned long th_pressure)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpus)
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+}
+
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0a01df608849..2cb5e04cf86c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
- * field is protected by the master genpd->lock, which is already taken.
+ * field is protected by the parent genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
- * requirement to master domain) is different from
- * link->slave->performance_state (current performance state requirement
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
if (link->performance_state > state)
state = link->performance_state;
}
@@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
- struct generic_pm_domain *master;
+ struct generic_pm_domain *parent;
struct gpd_link *link;
- int master_state, ret;
+ int parent_state, ret;
if (state == genpd->performance_state)
return 0;
- /* Propagate to masters of genpd */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- master = link->master;
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- /* Find master's performance state */
+ /* Find parent's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- master->opp_table,
+ parent->opp_table,
state);
if (unlikely(ret < 0))
goto err;
- master_state = ret;
+ parent_state = ret;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
link->prev_performance_state = link->performance_state;
- link->performance_state = master_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
- genpd_unlock(master);
+ genpd_unlock(parent);
if (ret)
goto err;
@@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
err:
/* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->slave_links,
- slave_node) {
- master = link->master;
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
- if (!master->set_performance_state)
+ if (!parent->set_performance_state)
continue;
- genpd_lock_nested(master, depth + 1);
+ genpd_lock_nested(parent, depth + 1);
- master_state = link->prev_performance_state;
- link->performance_state = master_state;
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
- master->name, master_state);
+ parent->name, parent_state);
}
- genpd_unlock(master);
+ genpd_unlock(parent);
}
return ret;
@@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
+ * managed to call genpd_power_on() for the parent yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't
@@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd);
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return 0;
}
/**
- * genpd_power_on - Restore power to a given PM domain and its masters.
+ * genpd_power_on - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
- * Restore power to @genpd and all of its masters so that it is possible to
+ * Restore power to @genpd and all of its parents so that it is possible to
* resume a device belonging to it.
*/
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
/*
* The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
+ * executed, unless one of the parents' .power_on() callbacks fiddles
* with it.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_sd_counter_inc(master);
+ genpd_sd_counter_inc(parent);
- genpd_lock_nested(master, depth + 1);
- ret = genpd_power_on(master, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
if (ret) {
- genpd_sd_counter_dec(master);
+ genpd_sd_counter_dec(parent);
goto err;
}
}
@@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
err:
list_for_each_entry_continue_reverse(link,
- &genpd->slave_links,
- slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
}
return ret;
@@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
/**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
@@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
genpd->status = GPD_STATE_POWER_OFF;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_off(link->master, use_lock, depth + 1);
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
}
/**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
@@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
if (genpd_status_on(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
+ genpd_lock_nested(link->parent, depth + 1);
- genpd_sync_power_on(link->master, use_lock, depth + 1);
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
if (use_lock)
- genpd_unlock(link->master);
+ genpd_unlock(link->parent);
}
_genpd_power_on(genpd, false);
@@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
if (!genpd_is_cpu_domain(genpd))
return;
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
- genpd_lock_nested(master, depth + 1);
- genpd_update_cpumask(master, cpu, set, depth + 1);
- genpd_unlock(master);
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
}
if (set)
@@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(itr, &genpd->master_links, master_node) {
- if (itr->slave == subdomain && itr->master == genpd) {
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
ret = -EINVAL;
goto out;
}
}
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd);
@@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
+ * @genpd: Leader PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
*/
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
+ * @genpd: Leader PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
*/
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name);
ret = -EBUSY;
goto out;
}
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
continue;
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd);
@@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd))
return -EINVAL;
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
genpd_lock_init(genpd);
genpd->gov = gov;
@@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
return -EBUSY;
}
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY;
}
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
- list_del(&link->master_node);
- list_del(&link->slave_node);
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
kfree(link);
}
@@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s,
/*
* Modifications on the list require holding locks on both
- * master and slave, so we are safe.
+ * parent and child, so we are safe.
* Also genpd->name is immutable.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &genpd->master_links))
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
}
@@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status slaves\n");
+ seq_puts(s, "domain status children\n");
seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n");
@@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- list_for_each_entry(link, &genpd->master_links, master_node)
- seq_printf(s, "%s\n", link->slave->name);
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
genpd_unlock(genpd);
return ret;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index daa8c7689f7e..490ed7deb99a 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*
* All subdomains have been powered off already at this point.
*/
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *sd = link->child;
s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0)
@@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
}
/*
- * We have to invalidate the cached results for the masters, so
+ * We have to invalidate the cached results for the parents, so
* use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
+ * going to be called for any parent until this instance
* returns.
*/
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
+ list_for_each_entry(link, &genpd->child_links, child_node)
+ link->parent->max_off_time_changed = true;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 24d25cf8ab14..c7b24812523c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* sysfs entries for device PM */
#include <linux/device.h>
+#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
@@ -739,12 +740,18 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
int wakeup_sysfs_add(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+ if (!ret)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ return ret;
}
void wakeup_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
int pm_qos_sysfs_add_resume_latency(struct device *dev)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4340e1d268b6..369a57e6f89d 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -541,9 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = {
};
/**
- * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling
+ * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
*
- * @np: The device_node where the IRQ domain should be added to.
+ * @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts.
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -557,10 +557,11 @@ static const struct irq_domain_ops regmap_domain_ops = {
* register cache. The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume.
*/
-int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
- int irq_flags, int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
int i;
@@ -771,10 +772,12 @@ int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
}
if (irq_base)
- d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base,
+ d->domain = irq_domain_add_legacy(to_of_node(fwnode),
+ chip->num_irqs, irq_base,
0, &regmap_domain_ops, d);
else
- d->domain = irq_domain_add_linear(np, chip->num_irqs,
+ d->domain = irq_domain_add_linear(to_of_node(fwnode),
+ chip->num_irqs,
&regmap_domain_ops, d);
if (!d->domain) {
dev_err(map->dev, "Failed to create IRQ domain\n");
@@ -808,7 +811,7 @@ err_alloc:
kfree(d);
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
/**
* regmap_add_irq_chip() - Use standard regmap IRQ controller handling
@@ -822,15 +825,15 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np);
*
* Returns 0 on success or an errno on failure.
*
- * This is the same as regmap_add_irq_chip_np, except that the device
+ * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
* node of the regmap is used.
*/
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
- return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags,
- irq_base, chip, data);
+ return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
+ irq_flags, irq_base, chip, data);
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
@@ -899,10 +902,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np()
+ * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
*
* @dev: The device pointer on which irq_chip belongs to.
- * @np: The device_node where the IRQ domain should be added to.
+ * @fwnode: The firmware node where the IRQ domain should be added to.
* @map: The regmap for the device.
* @irq: The IRQ the device uses to signal interrupts
* @irq_flags: The IRQF_ flags to use for the primary interrupt.
@@ -915,11 +918,12 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
* The &regmap_irq_chip_data will be automatically released when the device is
* unbound.
*/
-int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
- struct regmap *map, int irq, int irq_flags,
- int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data)
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data **ptr, *d;
int ret;
@@ -929,8 +933,8 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
if (!ptr)
return -ENOMEM;
- ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base,
- chip, &d);
+ ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
+ chip, &d);
if (ret < 0) {
devres_free(ptr);
return ret;
@@ -941,7 +945,7 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
*data = d;
return 0;
}
-EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np);
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
* devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
@@ -964,8 +968,9 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
- return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq,
- irq_flags, irq_base, chip, data);
+ return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
+ irq, irq_flags, irq_base, chip,
+ data);
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 795a62a04022..e93700af7e6e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -631,7 +631,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config)
{
- struct device_node *np;
+ struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
enum regmap_endian endian;
/* Retrieve the endianness specification from the regmap config */
@@ -641,22 +641,17 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
if (endian != REGMAP_ENDIAN_DEFAULT)
return endian;
- /* If the dev and dev->of_node exist try to get endianness from DT */
- if (dev && dev->of_node) {
- np = dev->of_node;
-
- /* Parse the device's DT node for an endianness specification */
- if (of_property_read_bool(np, "big-endian"))
- endian = REGMAP_ENDIAN_BIG;
- else if (of_property_read_bool(np, "little-endian"))
- endian = REGMAP_ENDIAN_LITTLE;
- else if (of_property_read_bool(np, "native-endian"))
- endian = REGMAP_ENDIAN_NATIVE;
-
- /* If the endianness was specified in DT, use that */
- if (endian != REGMAP_ENDIAN_DEFAULT)
- return endian;
- }
+ /* If the firmware node exist try to get endianness from it */
+ if (fwnode_property_read_bool(fwnode, "big-endian"))
+ endian = REGMAP_ENDIAN_BIG;
+ else if (fwnode_property_read_bool(fwnode, "little-endian"))
+ endian = REGMAP_ENDIAN_LITTLE;
+ else if (fwnode_property_read_bool(fwnode, "native-endian"))
+ endian = REGMAP_ENDIAN_NATIVE;
+
+ /* If the endianness was specified in fwnode, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
/* Retrieve the endianness specification from the bus config */
if (bus && bus->val_format_endian_default)
@@ -2024,7 +2019,7 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
{
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2fb25c348d53..2723a70eb855 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -282,7 +282,7 @@ out:
return err;
}
-static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
@@ -330,6 +330,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
+ .submit_bio = brd_submit_bio,
.rw_page = brd_rw_page,
};
@@ -381,7 +382,7 @@ static struct brd_device *brd_alloc(int i)
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
- brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE);
+ brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE);
if (!brd->brd_queue)
goto out_free_dev;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 52d885cdccb5..cbacddc55a1d 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -35,7 +35,7 @@ config BLK_DEV_DRBD
cache coherency.
For automatic failover you need a cluster manager (e.g. heartbeat).
- See also: http://www.drbd.org/, http://www.linux-ha.org
+ See also: https://www.drbd.org/, http://www.linux-ha.org
If unsure, say N.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 33d0831c99b6..fe6cb99eb917 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1451,7 +1451,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
-extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
+extern blk_qc_t drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1576,12 +1576,12 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
/*
* used to submit our private bio
*/
-static inline void drbd_generic_make_request(struct drbd_device *device,
+static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
if (!bio->bi_disk) {
- drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
+ drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
@@ -1590,7 +1590,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
if (drbd_insert_fault(device, fault_type))
bio_io_error(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 45fbd526c453..cb687ccdbd96 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -132,9 +132,10 @@ wait_queue_head_t drbd_pp_wait;
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
- .owner = THIS_MODULE,
- .open = drbd_open,
- .release = drbd_release,
+ .owner = THIS_MODULE,
+ .submit_bio = drbd_submit_bio,
+ .open = drbd_open,
+ .release = drbd_release,
};
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
@@ -2324,7 +2325,7 @@ static void do_retry(struct work_struct *ws)
* workqueues instead.
*/
- /* We are not just doing generic_make_request(),
+ /* We are not just doing submit_bio_noacct(),
* as we want to keep the start_time information. */
inc_ap_bio(device);
__drbd_make_request(device, bio, start_jif);
@@ -2414,62 +2415,6 @@ static void drbd_cleanup(void)
pr_info("module cleanup done.\n");
}
-/**
- * drbd_congested() - Callback for the flusher thread
- * @congested_data: User data
- * @bdi_bits: Bits the BDI flusher thread is currently interested in
- *
- * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
- */
-static int drbd_congested(void *congested_data, int bdi_bits)
-{
- struct drbd_device *device = congested_data;
- struct request_queue *q;
- char reason = '-';
- int r = 0;
-
- if (!may_inc_ap_bio(device)) {
- /* DRBD has frozen IO */
- r = bdi_bits;
- reason = 'd';
- goto out;
- }
-
- if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
- r |= (1 << WB_async_congested);
- /* Without good local data, we would need to read from remote,
- * and that would need the worker thread as well, which is
- * currently blocked waiting for that usermode helper to
- * finish.
- */
- if (!get_ldev_if_state(device, D_UP_TO_DATE))
- r |= (1 << WB_sync_congested);
- else
- put_ldev(device);
- r &= bdi_bits;
- reason = 'c';
- goto out;
- }
-
- if (get_ldev(device)) {
- q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(q->backing_dev_info, bdi_bits);
- put_ldev(device);
- if (r)
- reason = 'b';
- }
-
- if (bdi_bits & (1 << WB_async_congested) &&
- test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
- r |= (1 << WB_async_congested);
- reason = reason == 'b' ? 'a' : 'n';
- }
-
-out:
- device->congestion_reason = reason;
- return r;
-}
-
static void drbd_init_workqueue(struct drbd_work_queue* wq)
{
spin_lock_init(&wq->q_lock);
@@ -2801,11 +2746,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
goto out_no_q;
device->rq_queue = q;
- q->queuedata = device;
disk = alloc_disk(1);
if (!disk)
@@ -2825,9 +2769,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info->congested_fn = drbd_congested;
- q->backing_dev_info->congested_data = device;
-
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index da4a3ebe04ef..28eb078f8b75 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1250,7 +1250,7 @@ static void fixup_discard_if_not_supported(struct request_queue *q)
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
{
- /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
+ /* Fixup max_write_zeroes_sectors after blk_stack_limits():
* if we can handle "zeroes" efficiently on the protocol,
* we want to do that, even if our backend does not announce
* max_write_zeroes_sectors itself. */
@@ -1361,7 +1361,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
- blk_queue_stack_limits(q, b);
+ blk_stack_limits(&q->limits, &b->limits, 0);
if (q->backing_dev_info->ra_pages !=
b->backing_dev_info->ra_pages) {
@@ -3423,7 +3423,7 @@ int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
- struct drbd_device *uninitialized_var(device);
+ struct drbd_device *device;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
struct device_info device_info;
@@ -3512,7 +3512,7 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource = NULL, *next_resource;
- struct drbd_connection *uninitialized_var(connection);
+ struct drbd_connection *connection;
int err = 0, retcode;
struct drbd_genlmsghdr *dh;
struct connection_info connection_info;
@@ -3674,7 +3674,7 @@ int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlattr *resource_filter;
struct drbd_resource *resource;
- struct drbd_device *uninitialized_var(device);
+ struct drbd_device *device;
struct drbd_peer_device *peer_device = NULL;
int minor, err, retcode;
struct drbd_genlmsghdr *dh;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 1c41cd9982a2..3c0193de2498 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -265,7 +265,6 @@ int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3a3f2b6a821f..c74f561b4eab 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1723,7 +1723,7 @@ next_bio:
bios = bios->bi_next;
bio->bi_next = NULL;
- drbd_generic_make_request(device, fault_type, bio);
+ drbd_submit_bio_noacct(device, fault_type, bio);
} while (bios);
return 0;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c80a2f1c3c2a..674be09b2da9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1164,7 +1164,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_or_zeroes_req(req, EE_TRIM);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
put_ldev(device);
} else
bio_io_error(bio);
@@ -1593,12 +1593,12 @@ void do_submit(struct work_struct *ws)
}
}
-blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t drbd_submit_bio(struct bio *bio)
{
- struct drbd_device *device = (struct drbd_device *) q->queuedata;
+ struct drbd_device *device = bio->bi_disk->private_data;
unsigned long start_jif;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
start_jif = jiffies;
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index eeaa3b49b264..0067d328f0b5 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1604,7 +1604,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
void (*last_func)(struct sk_buff *, unsigned int, void *,
enum drbd_notification_type) = NULL;
- void *uninitialized_var(last_arg);
+ void *last_arg = NULL;
#define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
#define FINAL_STATE_CHANGE(type) \
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 2b89c9f2ca70..7c903de5c4e1 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1525,7 +1525,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_req_make_private_bio(req, req->master_bio);
bio_set_dev(req->private_bio, device->ldev->backing_bdev);
- generic_make_request(req->private_bio);
+ submit_bio_noacct(req->private_bio);
return 0;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3e9db22db2a8..09079aee8dc4 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4205,7 +4205,6 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
struct bio_vec bio_vec;
struct page *page;
struct rb0_cbdata cbdata;
- size_t size;
page = alloc_page(GFP_NOIO);
if (!page) {
@@ -4213,15 +4212,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
return -ENOMEM;
}
- size = bdev->bd_block_size;
- if (!size)
- size = 1024;
-
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
bio_set_dev(&bio, bdev);
- bio_add_page(&bio, page, size, 0);
+ bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 475e1a738560..d18160146226 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -509,7 +509,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -1089,11 +1090,10 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- claimed_bdev = bd_start_claiming(bdev, loop_configure);
- if (IS_ERR(claimed_bdev)) {
- error = PTR_ERR(claimed_bdev);
+ claimed_bdev = bdev->bd_contains;
+ error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure);
+ if (error)
goto out_putf;
- }
}
error = mutex_lock_killable(&loop_ctl_mutex);
@@ -2048,7 +2048,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
cmd->ret = ret;
else
cmd->ret = ret ? -EIO : 0;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
}
@@ -2402,6 +2403,8 @@ static void __exit loop_exit(void)
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+ mutex_lock(&loop_ctl_mutex);
+
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
idr_destroy(&loop_index_idr);
@@ -2409,6 +2412,8 @@ static void __exit loop_exit(void)
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
+
+ mutex_unlock(&loop_ctl_mutex);
}
module_init(loop_init);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index f6bafa9a68b9..153e2cdecb4d 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -492,7 +492,8 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
struct request *req = blk_mq_rq_from_pdu(cmd);
cmd->status = status;
- blk_mq_complete_request(req);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
}
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ce7e9f223b20..3ff4054d6834 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd;
+ struct request *rq;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
@@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work)
break;
}
- blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
+ rq = blk_mq_rq_from_pdu(cmd);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 81b311c9d781..daed4a9c3436 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -49,6 +49,7 @@ struct nullb_device {
unsigned long completion_nsec; /* time in ns to complete a request */
unsigned long cache_size; /* disk cache size in MB */
unsigned long zone_size; /* zone size in MB if device is zoned */
+ unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
unsigned int zone_nr_conv; /* number of conventional zones */
unsigned int submit_queues; /* number of submission queues */
unsigned int home_node; /* home node for the device */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 87b31f9ca362..47a9dad880af 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -200,6 +200,10 @@ static unsigned long g_zone_size = 256;
module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
+static unsigned long g_zone_capacity;
+module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
+MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
+
static unsigned int g_zone_nr_conv;
module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
@@ -341,6 +345,7 @@ NULLB_DEVICE_ATTR(mbps, uint, NULL);
NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
+NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
@@ -457,6 +462,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_badblocks,
&nullb_device_attr_zoned,
&nullb_device_attr_zone_size,
+ &nullb_device_attr_zone_capacity,
&nullb_device_attr_zone_nr_conv,
NULL,
};
@@ -510,7 +516,8 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_nr_conv\n");
+ return snprintf(page, PAGE_SIZE,
+ "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -571,6 +578,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
+ dev->zone_capacity = g_zone_capacity;
dev->zone_nr_conv = g_zone_nr_conv;
return dev;
}
@@ -1283,7 +1291,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
+ if (likely(!blk_should_fake_timeout(cmd->rq->q)))
+ blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
@@ -1387,11 +1396,11 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
return &nullb->queues[index];
}
-static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
+static blk_qc_t null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = q->queuedata;
+ struct nullb *nullb = bio->bi_disk->private_data;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
@@ -1423,7 +1432,7 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("rq %p timed out\n", rq);
- blk_mq_force_complete_rq(rq);
+ blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
@@ -1574,7 +1583,13 @@ static void null_config_discard(struct nullb *nullb)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
-static const struct block_device_operations null_ops = {
+static const struct block_device_operations null_bio_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = null_submit_bio,
+ .report_zones = null_report_zones,
+};
+
+static const struct block_device_operations null_rq_ops = {
.owner = THIS_MODULE,
.report_zones = null_report_zones,
};
@@ -1646,7 +1661,10 @@ static int null_gendisk_register(struct nullb *nullb)
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
- disk->fops = &null_ops;
+ if (queue_is_mq(nullb->q))
+ disk->fops = &null_rq_ops;
+ else
+ disk->fops = &null_bio_ops;
disk->private_data = nullb;
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
@@ -1791,7 +1809,7 @@ static int null_add_dev(struct nullb_device *dev)
goto out_cleanup_tags;
}
} else if (dev->queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node);
+ nullb->q = blk_alloc_queue(dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index cc47606d8ffe..3d25c9ad2383 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -28,6 +28,15 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
return -EINVAL;
}
+ if (!dev->zone_capacity)
+ dev->zone_capacity = dev->zone_size;
+
+ if (dev->zone_capacity > dev->zone_size) {
+ pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
+ dev->zone_capacity, dev->zone_size);
+ return -EINVAL;
+ }
+
dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
dev->nr_zones = dev_size >>
(SECTOR_SHIFT + ilog2(dev->zone_size_sects));
@@ -47,6 +56,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
zone->start = sector;
zone->len = dev->zone_size_sects;
+ zone->capacity = zone->len;
zone->wp = zone->start + zone->len;
zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
zone->cond = BLK_ZONE_COND_NOT_WP;
@@ -59,6 +69,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
zone->start = zone->wp = sector;
zone->len = dev->zone_size_sects;
+ zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT;
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone->cond = BLK_ZONE_COND_EMPTY;
@@ -185,6 +196,9 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return BLK_STS_IOERR;
}
+ if (zone->wp + nr_sectors > zone->start + zone->capacity)
+ return BLK_STS_IOERR;
+
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
@@ -193,7 +207,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
return ret;
zone->wp += nr_sectors;
- if (zone->wp == zone->start + zone->len)
+ if (zone->wp == zone->start + zone->capacity)
zone->cond = BLK_ZONE_COND_FULL;
return BLK_STS_OK;
default:
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 27a33adc41e4..4becc1efe775 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -36,7 +36,7 @@
* block device, assembling the pieces to full packets and queuing them to the
* packet I/O scheduler.
*
- * At the top layer there is a custom make_request_fn function that forwards
+ * At the top layer there is a custom ->submit_bio function that forwards
* read requests directly to the iosched queue and puts write requests in the
* unaligned write queue. A kernel thread performs the necessary read
* gathering to convert the unaligned writes to aligned writes and then feeds
@@ -913,7 +913,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
}
atomic_inc(&pd->cdrw.pending_bios);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -2428,15 +2428,15 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
}
-static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
struct bio *split;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
- pd = q->queuedata;
+ pd = bio->bi_disk->queue->queuedata;
if (!pd) {
pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
@@ -2480,7 +2480,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
split = bio;
}
- pkt_make_request_write(q, split);
+ pkt_make_request_write(bio->bi_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
@@ -2685,6 +2685,7 @@ static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
+ .submit_bio = pkt_submit_bio,
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
@@ -2749,7 +2750,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
- disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE);
+ disk->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!disk->queue)
goto out_mem2;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 821d4d8b1d76..1088798c8dd0 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -90,12 +90,6 @@ struct ps3vram_priv {
static int ps3vram_major;
-
-static const struct block_device_operations ps3vram_fops = {
- .owner = THIS_MODULE,
-};
-
-
#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
#define DMA_NOTIFIER_SIZE 0x40
@@ -585,15 +579,15 @@ out:
return next;
}
-static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t ps3vram_submit_bio(struct bio *bio)
{
- struct ps3_system_bus_device *dev = q->queuedata;
+ struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int busy;
dev_dbg(&dev->core, "%s\n", __func__);
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
@@ -610,6 +604,11 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static const struct block_device_operations ps3vram_fops = {
+ .owner = THIS_MODULE,
+ .submit_bio = ps3vram_submit_bio,
+};
+
static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
@@ -737,7 +736,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
- queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE);
+ queue = blk_alloc_queue(NUMA_NO_NODE);
if (!queue) {
dev_err(&dev->core, "blk_alloc_queue failed\n");
error = -ENOMEM;
@@ -745,7 +744,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
}
priv->queue = queue;
- queue->queuedata = dev;
blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4f61e9209461..d9c0e7d154f9 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1993,7 +1993,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
struct ceph_osd_data *osd_data;
u64 objno;
- u8 state, new_state, uninitialized_var(current_state);
+ u8 state, new_state, current_state;
bool has_current_state;
void *p;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 10f6368117d8..01333af13bbb 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -562,13 +562,15 @@ static int rsxx_eeh_frozen(struct pci_dev *dev)
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
return 0;
@@ -711,15 +713,15 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
failed_hw_buffers_init:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
- pci_free_consistent(card->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ STATUS_BUFFER_SIZE8,
+ card->ctrl[i].status.buf,
+ card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
- pci_free_consistent(card->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev,
+ COMMAND_BUFFER_SIZE8,
+ card->ctrl[i].cmd.buf,
+ card->ctrl[i].cmd.dma_addr);
}
failed_hw_setup:
rsxx_eeh_failure(dev);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 3ba07ab30c84..edacefff6e35 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -50,6 +50,8 @@ struct rsxx_bio_meta {
static struct kmem_cache *bio_meta_pool;
+static blk_qc_t rsxx_submit_bio(struct bio *bio);
+
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
fmode_t mode,
@@ -92,6 +94,7 @@ static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static const struct block_device_operations rsxx_fops = {
.owner = THIS_MODULE,
+ .submit_bio = rsxx_submit_bio,
.getgeo = rsxx_getgeo,
.ioctl = rsxx_blkdev_ioctl,
};
@@ -117,13 +120,13 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
}
}
-static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t rsxx_submit_bio(struct bio *bio)
{
- struct rsxx_cardinfo *card = q->queuedata;
+ struct rsxx_cardinfo *card = bio->bi_disk->private_data;
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
might_sleep();
@@ -233,7 +236,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE);
+ card->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!card->queue) {
dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
unregister_blkdev(card->major, DRIVER_NAME);
@@ -267,8 +270,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
}
- card->queue->queuedata = card;
-
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
"rsxx%d", card->disk_id);
card->gendisk->major = card->major;
@@ -289,7 +290,6 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card)
card->gendisk = NULL;
blk_cleanup_queue(card->queue);
- card->queue->queuedata = NULL;
unregister_blkdev(card->major, DRIVER_NAME);
}
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 51569c199a6c..3a476dc1d14f 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1417,7 +1417,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
skreq->status = BLK_STS_OK;
- blk_mq_complete_request(req);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -1440,7 +1441,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
skreq->status = BLK_STS_IOERR;
- blk_mq_complete_request(req);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
break;
}
}
@@ -1560,7 +1562,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
*/
if (likely(cmp_status == SAM_STAT_GOOD)) {
skreq->status = BLK_STS_OK;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
} else {
skd_resolve_req_exception(skdev, skreq, rq);
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 1e2aa5ae2796..2b95d7b33b91 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -519,14 +519,15 @@ static int mm_check_plugged(struct cardinfo *card)
return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
}
-static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t mm_submit_bio(struct bio *bio)
{
- struct cardinfo *card = q->queuedata;
+ struct cardinfo *card = bio->bi_disk->private_data;
+
pr_debug("mm_make_request %llu %u\n",
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size);
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
spin_lock_irq(&card->lock);
*card->biotail = bio;
@@ -778,6 +779,7 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static const struct block_device_operations mm_fops = {
.owner = THIS_MODULE,
+ .submit_bio = mm_submit_bio,
.getgeo = mm_getgeo,
.revalidate_disk = mm_revalidate,
};
@@ -885,10 +887,9 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->biotail = &card->bio;
spin_lock_init(&card->lock);
- card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE);
+ card->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!card->queue)
goto failed_alloc;
- card->queue->queuedata = card;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 980df853ee49..63b213e00b37 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -171,7 +171,8 @@ static void virtblk_done(struct virtqueue *vq)
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
- blk_mq_complete_request(req);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3b889ea950c2..3bb3dd8da9b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1655,7 +1655,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
BUG();
}
- blk_mq_complete_request(req);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
}
rinfo->ring.rsp_cons = i;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 270dd810be54..9100ac36670a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -793,9 +793,9 @@ static void zram_sync_read(struct work_struct *work)
}
/*
- * Block layer want one ->make_request_fn to be active at a time
- * so if we use chained IO with parent IO in same context,
- * it's a deadlock. To avoid, it, it uses worker thread context.
+ * Block layer want one ->submit_bio to be active at a time, so if we use
+ * chained IO with parent IO in same context, it's a deadlock. To avoid that,
+ * use a worker thread context.
*/
static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
unsigned long entry, struct bio *bio)
@@ -1584,9 +1584,9 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
+static blk_qc_t zram_submit_bio(struct bio *bio)
{
- struct zram *zram = queue->queuedata;
+ struct zram *zram = bio->bi_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
@@ -1813,6 +1813,7 @@ static int zram_open(struct block_device *bdev, fmode_t mode)
static const struct block_device_operations zram_devops = {
.open = zram_open,
+ .submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
.rw_page = zram_rw_page,
.owner = THIS_MODULE
@@ -1891,7 +1892,7 @@ static int zram_add(void)
#ifdef CONFIG_ZRAM_WRITEBACK
spin_lock_init(&zram->wb_limit_lock);
#endif
- queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE);
+ queue = blk_alloc_queue(NUMA_NO_NODE);
if (!queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
@@ -1912,7 +1913,6 @@ static int zram_add(void)
zram->disk->first_minor = device_id;
zram->disk->fops = &zram_devops;
zram->disk->queue = queue;
- zram->disk->queue->queuedata = zram;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index c8b1c3842c1a..2a473c09bc33 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -27,7 +27,16 @@ static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
}
static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
@@ -150,6 +159,27 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
}
}
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
/**
* dprc_add_new_devices - Adds devices to the logical bus for a DPRC
*
@@ -166,30 +196,23 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
- int error;
int i;
+ /* probe the allocable objects first */
for (i = 0; i < num_child_objects_in_mc; i++) {
- struct fsl_mc_device *child_dev;
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- if (strlen(obj_desc->type) == 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
- /*
- * Check if device is already known to Linux:
- */
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
- put_device(&child_dev->dev);
- continue;
- }
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
- &child_dev);
- if (error < 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
}
@@ -592,6 +615,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
bool mc_io_created = false;
bool msi_domain_set = false;
u16 major_ver, minor_ver;
+ struct irq_domain *mc_msi_domain;
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
@@ -621,31 +645,15 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
return error;
mc_io_created = true;
+ }
- /*
- * Inherit parent MSI domain:
- */
- dev_set_msi_domain(&mc_dev->dev,
- dev_get_msi_domain(parent_dev));
- msi_domain_set = true;
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
} else {
- /*
- * This is a root DPRC
- */
- struct irq_domain *mc_msi_domain;
-
- if (dev_is_fsl_mc(parent_dev))
- return -EINVAL;
-
- error = fsl_mc_find_msi_domain(parent_dev,
- &mc_msi_domain);
- if (error < 0) {
- dev_warn(&mc_dev->dev,
- "WARNING: MC bus without interrupt support\n");
- } else {
- dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
- msi_domain_set = true;
- }
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
}
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 40526da5c6a6..b69794e7364d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -18,6 +18,8 @@
#include <linux/bitops.h>
#include <linux/msi.h>
#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
#include "fsl-mc-private.h"
@@ -38,6 +40,7 @@ struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
+ void *fsl_mc_regs;
};
/**
@@ -56,6 +59,10 @@ struct fsl_mc_addr_translation_range {
phys_addr_t start_phys_addr;
};
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against
@@ -118,11 +125,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ u32 input_id = mc_dev->icid;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
- return of_dma_configure(dev, dma_dev->of_node, 0);
+ if (dev_of_node(dma_dev))
+ return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+
+ return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -201,6 +213,31 @@ struct device_type fsl_mc_bus_dpseci_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -217,6 +254,11 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
{ &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
{ NULL, NULL }
};
int i;
@@ -368,8 +410,8 @@ EXPORT_SYMBOL_GPL(fsl_mc_get_version);
/**
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
-static void fsl_mc_get_root_dprc(struct device *dev,
- struct device **root_dprc_dev)
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
{
if (!dev) {
*root_dprc_dev = NULL;
@@ -863,8 +905,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct fsl_mc_io *mc_io = NULL;
int container_id;
phys_addr_t mc_portal_phys_addr;
- u32 mc_portal_size;
- struct resource res;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
+
+ if (!iommu_present(&fsl_mc_bus_type))
+ return -EPROBE_DEFER;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
@@ -872,19 +917,33 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+
+ if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit 15 of
+ * the StreamID and ORs in the ICID. Calculate it accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ 0x4000 : 0);
+ error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error)
+ dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
+ error);
+ }
+
/*
* Get physical address of MC portal for the root DPRC:
*/
- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
- if (error < 0) {
- dev_err(&pdev->dev,
- "of_address_to_resource() failed for %pOF\n",
- pdev->dev.of_node);
- return error;
- }
-
- mc_portal_phys_addr = res.start;
- mc_portal_size = resource_size(&res);
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
mc_portal_size, NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
@@ -901,11 +960,13 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- error = get_mc_addr_translation_ranges(&pdev->dev,
- &mc->translation_ranges,
- &mc->num_translation_ranges);
- if (error < 0)
- goto error_cleanup_mc_io;
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
@@ -932,6 +993,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
return 0;
error_cleanup_mc_io:
@@ -965,11 +1027,18 @@ static const struct of_device_id fsl_mc_bus_match_table[] = {
MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
static struct platform_driver fsl_mc_bus_driver = {
.driver = {
.name = "fsl_mc_bus",
.pm = NULL,
.of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
.remove = fsl_mc_bus_remove,
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 8b9c66d7c4ff..8edadf05cbb7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
+#include <linux/acpi_iort.h>
#include "fsl-mc-private.h"
@@ -177,23 +178,36 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain)
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
struct irq_domain *msi_domain;
- struct device_node *mc_of_node = mc_platform_dev->of_node;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
- DOMAIN_BUS_FSL_MC_MSI);
- if (!msi_domain) {
- pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
- mc_of_node);
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
+
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
- return -ENOENT;
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
+
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
}
- *mc_msi_domain = msi_domain;
- return 0;
+ return msi_domain;
}
static void fsl_mc_msi_free_descs(struct device *dev)
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 21ca8c756ee7..7a46a12eb747 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -595,8 +595,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
void fsl_mc_msi_domain_free_irqs(struct device *dev);
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain);
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
unsigned int irq_count);
@@ -613,6 +612,9 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 6ae48ad80409..a30b53f1d87d 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -82,7 +82,7 @@ int __must_check fsl_create_mc_io(struct device *dev,
mc_io->portal_phys_addr = mc_portal_phys_addr;
mc_io->portal_size = mc_portal_size;
if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_init(&mc_io->spinlock);
+ raw_spin_lock_init(&mc_io->spinlock);
else
mutex_init(&mc_io->mutex);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 3221a7fbaf0a..85a0225db522 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -251,7 +251,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
else
mutex_lock(&mc_io->mutex);
@@ -287,7 +287,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
error = 0;
common_exit:
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
else
mutex_unlock(&mc_io->mutex);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 191c97b84715..fb5a901fd89e 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1395,6 +1395,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
@@ -1473,8 +1477,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d82b3b7658bd..0c271b9e3c5b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -605,7 +605,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
disk->cdi = cdi;
ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
- if (cdo->check_events == NULL && cdo->media_changed == NULL)
+ if (cdo->check_events == NULL)
WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(cdo, lock_door, CDC_LOCK);
@@ -1419,8 +1419,6 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
if (cdi->ops->check_events)
cdi->ops->check_events(cdi, 0, slot);
- else
- cdi->ops->media_changed(cdi, slot);
if (slot == CDSL_NONE) {
/* set media changed bits, on both queues */
@@ -1517,13 +1515,10 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
return ret;
/* changed since last call? */
- if (cdi->ops->check_events) {
- BUG_ON(!queue); /* shouldn't be called from VFS path */
- cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
- changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
- cdi->ioctl_events = 0;
- } else
- changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
+ BUG_ON(!queue); /* shouldn't be called from VFS path */
+ cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
+ changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
+ cdi->ioctl_events = 0;
if (changed) {
cdi->mc_flags = 0x3; /* set bit on both queues */
@@ -1535,18 +1530,6 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
return ret;
}
-int cdrom_media_changed(struct cdrom_device_info *cdi)
-{
- /* This talks to the VFS, which doesn't like errors - just 1 or 0.
- * Returning "0" is always safe (media hasn't been changed). Do that
- * if the low-level cdrom driver dosn't support media changed. */
- if (cdi == NULL || cdi->ops->media_changed == NULL)
- return 0;
- if (!CDROM_CAN(CDC_MEDIA_CHANGED))
- return 0;
- return media_changed(cdi, 0);
-}
-
/* Requests to the low-level drivers will /always/ be done in the
following format convention:
@@ -3464,7 +3447,6 @@ EXPORT_SYMBOL(unregister_cdrom);
EXPORT_SYMBOL(cdrom_open);
EXPORT_SYMBOL(cdrom_release);
EXPORT_SYMBOL(cdrom_ioctl);
-EXPORT_SYMBOL(cdrom_media_changed);
EXPORT_SYMBOL(cdrom_number_of_slots);
EXPORT_SYMBOL(cdrom_mode_select);
EXPORT_SYMBOL(cdrom_mode_sense);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 98c3a5d8003e..b1bd336761b1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -38,7 +38,7 @@ config PRINTER
box (as opposed to using a serial printer; if the connector at the
printer has 9 or 25 holes ["female"], then it's serial), say Y.
Also read the Printing-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ <https://www.tldp.org/docs.html#howto>.
It is possible to share one parallel port among several devices
(e.g. printer and ZIP drive) and it is safe to compile the
@@ -201,7 +201,7 @@ config DTLK
depends on ISA
help
This driver is for the DoubleTalk PC, a speech synthesizer
- manufactured by RC Systems (<http://www.rcsys.com/>). It is also
+ manufactured by RC Systems (<https://www.rcsys.com/>). It is also
called the `internal DoubleTalk'.
To compile this driver as a module, choose M here: the
@@ -237,7 +237,7 @@ config APPLICOM
This driver provides the kernel-side support for the intelligent
fieldbus cards made by Applicom International. More information
about these cards can be found on the WWW at the address
- <http://www.applicom-int.com/>, or by email from David Woodhouse
+ <https://www.applicom-int.com/>, or by email from David Woodhouse
<dwmw2@infradead.org>.
To compile this driver as a module, choose M here: the
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 0ad17efc96df..f976a49e1fb5 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -74,6 +74,16 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
+config HW_RANDOM_BA431
+ tristate "Silex Insight BA431 Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware based on Silex Insight BA431 IP.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ba431-rng.
+
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
@@ -245,7 +255,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
depends on HAS_IOMEM && HAVE_CLK
- depends on SOC_IMX25 || COMPILE_TEST
+ depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -257,6 +267,21 @@ config HW_RANDOM_IMX_RNGC
If unsure, say Y.
+config HW_RANDOM_INGENIC_RNG
+ tristate "Ingenic Random Number Generator support"
+ depends on HW_RANDOM
+ depends on MACH_JZ4780 || MACH_X1000
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number Generator
+ hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses
+ JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ingenic-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 2c6724735345..26ae06844f09 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
+obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
+obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
new file mode 100644
index 000000000000..410b50b05e21
--- /dev/null
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Silex Insight
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define BA431_RESET_DELAY 1 /* usec */
+#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */
+#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */
+#define BA431_READ_RETRY_INTERVAL 1 /* usec */
+
+#define BA431_REG_CTRL 0x00
+#define BA431_REG_FIFO_LEVEL 0x04
+#define BA431_REG_STATUS 0x30
+#define BA431_REG_FIFODATA 0x80
+
+#define BA431_CTRL_ENABLE BIT(0)
+#define BA431_CTRL_SOFTRESET BIT(8)
+
+#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3))
+#define BA431_STATUS_STATE_OFFSET 1
+
+enum ba431_state {
+ BA431_STATE_RESET,
+ BA431_STATE_STARTUP,
+ BA431_STATE_FIFOFULLON,
+ BA431_STATE_FIFOFULLOFF,
+ BA431_STATE_RUNNING,
+ BA431_STATE_ERROR
+};
+
+struct ba431_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct hwrng rng;
+ atomic_t reset_pending;
+ struct work_struct reset_work;
+};
+
+static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg)
+{
+ return ioread32(ba431->base + reg);
+}
+
+static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg,
+ u32 val)
+{
+ iowrite32(val, ba431->base + reg);
+}
+
+static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431)
+{
+ u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS);
+
+ return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET;
+}
+
+static int ba431_trng_is_in_error(struct ba431_trng *ba431)
+{
+ enum ba431_state state = ba431_trng_get_state(ba431);
+
+ if ((state < BA431_STATE_STARTUP) ||
+ (state >= BA431_STATE_ERROR))
+ return 1;
+
+ return 0;
+}
+
+static int ba431_trng_reset(struct ba431_trng *ba431)
+{
+ int ret;
+
+ /* Disable interrupts, random generation and enable the softreset */
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET);
+ udelay(BA431_RESET_DELAY);
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE);
+
+ /* Wait until the state changed */
+ if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret,
+ BA431_RESET_READ_STATUS_INTERVAL,
+ BA431_RESET_READ_STATUS_TIMEOUT)) {
+ dev_err(ba431->dev, "reset failed (state: %d)\n",
+ ba431_trng_get_state(ba431));
+ return -ETIMEDOUT;
+ }
+
+ dev_info(ba431->dev, "reset done\n");
+
+ return 0;
+}
+
+static void ba431_trng_reset_work(struct work_struct *work)
+{
+ struct ba431_trng *ba431 = container_of(work, struct ba431_trng,
+ reset_work);
+ ba431_trng_reset(ba431);
+ atomic_set(&ba431->reset_pending, 0);
+}
+
+static void ba431_trng_schedule_reset(struct ba431_trng *ba431)
+{
+ if (atomic_cmpxchg(&ba431->reset_pending, 0, 1))
+ return;
+
+ schedule_work(&ba431->reset_work);
+}
+
+static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+ u32 *data = buf;
+ unsigned int level, i;
+ int n = 0;
+
+ while (max > 0) {
+ level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL);
+ if (!level) {
+ if (ba431_trng_is_in_error(ba431)) {
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+
+ if (!wait)
+ break;
+
+ udelay(BA431_READ_RETRY_INTERVAL);
+ continue;
+ }
+
+ i = level;
+ do {
+ data[n++] = ba431_trng_read_reg(ba431,
+ BA431_REG_FIFODATA);
+ max -= sizeof(*data);
+ } while (--i && (max > 0));
+
+ if (ba431_trng_is_in_error(ba431)) {
+ n -= (level - i);
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+ }
+
+ n *= sizeof(data);
+ return (n || !wait) ? n : -EIO;
+}
+
+static void ba431_trng_cleanup(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0);
+ cancel_work_sync(&ba431->reset_work);
+}
+
+static int ba431_trng_init(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ return ba431_trng_reset(ba431);
+}
+
+static int ba431_trng_probe(struct platform_device *pdev)
+{
+ struct ba431_trng *ba431;
+ struct resource *res;
+ int ret;
+
+ ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
+ if (!ba431)
+ return -ENOMEM;
+
+ ba431->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ba431->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ba431->base))
+ return PTR_ERR(ba431->base);
+
+ atomic_set(&ba431->reset_pending, 0);
+ INIT_WORK(&ba431->reset_work, ba431_trng_reset_work);
+ ba431->rng.name = pdev->name;
+ ba431->rng.init = ba431_trng_init;
+ ba431->rng.cleanup = ba431_trng_cleanup;
+ ba431->rng.read = ba431_trng_read;
+
+ platform_set_drvdata(pdev, ba431);
+
+ ret = hwrng_register(&ba431->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "BA431 TRNG registered\n");
+
+ return 0;
+}
+
+static int ba431_trng_remove(struct platform_device *pdev)
+{
+ struct ba431_trng *ba431 = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&ba431->rng);
+
+ return 0;
+}
+
+static const struct of_device_id ba431_trng_dt_ids[] = {
+ { .compatible = "silex-insight,ba431-rng", .data = NULL },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
+
+static struct platform_driver ba431_trng_driver = {
+ .driver = {
+ .name = "ba431-rng",
+ .of_match_table = ba431_trng_dt_ids,
+ },
+ .probe = ba431_trng_probe,
+ .remove = ba431_trng_remove,
+};
+
+module_platform_driver(ba431_trng_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index cbf5eaea662c..1a7c43b43c6b 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -139,7 +139,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
{
const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -166,7 +165,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, np);
+ rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
if (!rng_id)
return -EINVAL;
@@ -188,7 +187,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-static struct platform_device_id bcm2835_rng_devtype[] = {
+static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
{ /* sentinel */ }
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d7a42d7e0d..8c1c47dd9f46 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void)
{
- int ret = -ENOMEM;
+ int ret;
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index 6815e17a9834..96438f85cafa 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -99,7 +99,7 @@ static int hisi_rng_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id hisi_rng_dt_ids[] = {
+static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = {
{ .compatible = "hisilicon,hip04-rng" },
{ .compatible = "hisilicon,hip05-rng" },
{ }
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
new file mode 100644
index 000000000000..d704cef64b64
--- /dev/null
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic Random Number Generator driver
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* RNG register offsets */
+#define RNG_REG_ERNG_OFFSET 0x0
+#define RNG_REG_RNG_OFFSET 0x4
+
+/* bits within the ERND register */
+#define ERNG_READY BIT(31)
+#define ERNG_ENABLE BIT(0)
+
+enum ingenic_rng_version {
+ ID_JZ4780,
+ ID_X1000,
+};
+
+/* Device associated memory */
+struct ingenic_rng {
+ enum ingenic_rng_version version;
+
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int ingenic_rng_init(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static void ingenic_rng_cleanup(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+}
+
+static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ if (priv->version >= ID_X1000) {
+ ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status,
+ status & ERNG_READY, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for RNG data ready timeout\n", __func__);
+ return ret;
+ }
+ } else {
+ /*
+ * A delay is required so that the current RNG data is not bit shifted
+ * version of previous RNG data which could happen if random data is
+ * read continuously from this device.
+ */
+ udelay(20);
+ }
+
+ *data = readl(priv->base + RNG_REG_RNG_OFFSET);
+
+ return 4;
+}
+
+static int ingenic_rng_probe(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ pr_err("%s: Failed to map RNG registers\n", __func__);
+ ret = PTR_ERR(priv->base);
+ goto err_free_rng;
+ }
+
+ priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = ingenic_rng_init;
+ priv->rng.cleanup = ingenic_rng_cleanup;
+ priv->rng.read = ingenic_rng_read;
+
+ ret = hwrng_register(&priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hwrng\n");
+ goto err_free_rng;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
+ return 0;
+
+err_free_rng:
+ kfree(priv);
+ return ret;
+}
+
+static int ingenic_rng_remove(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&priv->rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_rng_of_match[] = {
+ { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 },
+ { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
+
+static struct platform_driver ingenic_rng_driver = {
+ .probe = ingenic_rng_probe,
+ .remove = ingenic_rng_remove,
+ .driver = {
+ .name = "ingenic-rng",
+ .of_match_table = ingenic_rng_of_match,
+ },
+};
+
+module_platform_driver(ingenic_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic Random Number Generator driver");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 001617033d6a..8f1d47ff9799 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -2,7 +2,7 @@
/*
* Random Number Generator driver for the Keystone SOC
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Sandeep Nair
* Vitaly Andrianov
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 74ed29f42e4f..b0ded41eb865 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -76,7 +76,7 @@ static int nmk_rng_remove(struct amba_device *dev)
return 0;
}
-static struct amba_id nmk_rng_ids[] = {
+static const struct amba_id nmk_rng_ids[] = {
{
.id = 0x000805e1,
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 01d04404d8c0..5d0d13f891b7 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -161,7 +161,7 @@ static const struct dev_pm_ops npcm_rng_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id rng_dt_id[] = {
+static const struct of_device_id rng_dt_id[] __maybe_unused = {
{ .compatible = "nuvoton,npcm750-rng", },
{},
};
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 7be8067ac4e8..8561a09b4681 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng)
ctl.u64 = 0;
ctl.s.ent_en = 1; /* Enable the entropy source. */
ctl.s.rng_en = 1; /* Enable the RNG hardware. */
- cvmx_write_csr((u64)p->control_status, ctl.u64);
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
return 0;
}
@@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng)
ctl.u64 = 0;
/* Disable everything. */
- cvmx_write_csr((u64)p->control_status, ctl.u64);
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
}
static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
{
struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
- *data = cvmx_read64_uint32((u64)p->result);
+ *data = cvmx_read64_uint32((__force u64)p->result);
return sizeof(u32);
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 7290c603fcb8..5cc5fc504968 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -243,7 +244,6 @@ static struct omap_rng_pdata omap2_rng_pdata = {
.cleanup = omap2_rng_cleanup,
};
-#if defined(CONFIG_OF)
static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
{
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
@@ -358,7 +358,7 @@ static struct omap_rng_pdata eip76_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
-static const struct of_device_id omap_rng_of_match[] = {
+static const struct of_device_id omap_rng_of_match[] __maybe_unused = {
{
.compatible = "ti,omap2-rng",
.data = &omap2_rng_pdata,
@@ -418,13 +418,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
return 0;
}
-#else
-static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
- struct platform_device *pdev)
-{
- return -EINVAL;
-}
-#endif
static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
{
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 81080cb2294e..e8210c1715cf 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -119,7 +119,7 @@ static int pic32_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id pic32_rng_of_match[] = {
+static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
{ .compatible = "microchip,pic32mzda-rng", },
{ /* sentinel */ }
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 783c24e3f8b7..15ba1e6fae4d 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -121,7 +122,7 @@ static int st_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id st_rng_match[] = {
+static const struct of_device_id st_rng_match[] __maybe_unused = {
{ .compatible = "st,rng" },
{},
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 79a6e47b5fbc..a90001e02bf7 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -195,7 +195,7 @@ static int virtrng_restore(struct virtio_device *vdev)
}
#endif
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 691f5898bb32..f8d79d393b69 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -126,7 +126,7 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
- int bRC = -EIO;
+ int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
static const unsigned short ausDspBases[] = {
0x0030, 0x4E30, 0x8E30, 0xCE30,
@@ -497,7 +497,7 @@ exit_smapi_request_error:
int smapi_set_DSP_power_state(bool bOn)
{
- int bRC = -EIO;
+ int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
unsigned short usPowerFunction;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2a41b21623ae..d20ba1b104ca 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
+ this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 3484e9145aea..380bf518338e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -37,7 +37,7 @@ static struct raw_device_data *raw_devices;
static DEFINE_MUTEX(raw_mutex);
static const struct file_operations raw_ctl_fops; /* forward declaration */
-static int max_raw_minors = MAX_RAW_MINORS;
+static int max_raw_minors = CONFIG_MAX_RAW_DEVS;
module_param(max_raw_minors, int, 0);
MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
@@ -317,9 +317,9 @@ static int __init raw_init(void)
int ret;
if (max_raw_minors < 1 || max_raw_minors > 65536) {
- printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
- " between 1 and 65536), using %d\n", MAX_RAW_MINORS);
- max_raw_minors = MAX_RAW_MINORS;
+ pr_warn("raw: invalid max_raw_minors (must be between 1 and 65536), using %d\n",
+ CONFIG_MAX_RAW_DEVS);
+ max_raw_minors = CONFIG_MAX_RAW_DEVS;
}
raw_devices = vzalloc(array_size(max_raw_minors,
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 63ada5e53f13..3633ed70f48f 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -49,9 +49,9 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
void __iomem *virt;
u64 len, start;
struct tpm_bios_log *log;
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return -ENODEV;
+ struct acpi_table_tpm2 *tbl;
+ struct acpi_tpm2_phy *tpm2_phy;
+ int format;
log = &chip->log;
@@ -61,23 +61,44 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
if (!chip->acpi_dev_handle)
return -ENODEV;
- /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
- status = acpi_get_table(ACPI_SIG_TCPA, 1,
- (struct acpi_table_header **)&buff);
-
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- switch(buff->platform_class) {
- case BIOS_SERVER:
- len = buff->server.log_max_len;
- start = buff->server.log_start_addr;
- break;
- case BIOS_CLIENT:
- default:
- len = buff->client.log_max_len;
- start = buff->client.log_start_addr;
- break;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ status = acpi_get_table("TPM2", 1,
+ (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (tbl->header.length <
+ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy))
+ return -ENODEV;
+
+ tpm2_phy = (void *)tbl + sizeof(*tbl);
+ len = tpm2_phy->log_area_minimum_length;
+
+ start = tpm2_phy->log_area_start_address;
+ if (!start || !len)
+ return -ENODEV;
+
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ } else {
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+ status = acpi_get_table(ACPI_SIG_TCPA, 1,
+ (struct acpi_table_header **)&buff);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ switch (buff->platform_class) {
+ case BIOS_SERVER:
+ len = buff->server.log_max_len;
+ start = buff->server.log_start_addr;
+ break;
+ case BIOS_CLIENT:
+ default:
+ len = buff->client.log_max_len;
+ start = buff->client.log_start_addr;
+ break;
+ }
+
+ format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
if (!len) {
dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
@@ -98,7 +119,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
- return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ return format;
err:
kfree(log->bios_event_log);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 8c77e88012e9..ddaeceb7e109 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->cdev.owner = THIS_MODULE;
chip->cdevs.owner = THIS_MODULE;
- chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!chip->work_space.context_buf) {
- rc = -ENOMEM;
- goto out;
- }
- chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!chip->work_space.session_buf) {
+ rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 0fbcede241ea..947d1db0a5cc 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -59,6 +59,9 @@ enum tpm_addr {
#define TPM_TAG_RQU_COMMAND 193
+/* TPM2 specific constants. */
+#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
+
struct stclear_flags_t {
__be16 tag;
u8 deactivated;
@@ -228,7 +231,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
-int tpm2_init_space(struct tpm_space *space);
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
void tpm2_flush_space(struct tpm_chip *chip);
int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 982d341d8837..784b8b3cb903 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
}
}
-int tpm2_init_space(struct tpm_space *space)
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
{
- space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ space->context_buf = kzalloc(buf_size, GFP_KERNEL);
if (!space->context_buf)
return -ENOMEM;
- space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ space->session_buf = kzalloc(buf_size, GFP_KERNEL);
if (space->session_buf == NULL) {
kfree(space->context_buf);
+ /* Prevent caller getting a dangling pointer. */
+ space->context_buf = NULL;
return -ENOMEM;
}
+ space->buf_size = buf_size;
return 0;
}
@@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
sizeof(space->context_tbl));
memcpy(&chip->work_space.session_tbl, &space->session_tbl,
sizeof(space->session_tbl));
- memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
- memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
+ memcpy(chip->work_space.context_buf, space->context_buf,
+ space->buf_size);
+ memcpy(chip->work_space.session_buf, space->session_buf,
+ space->buf_size);
rc = tpm2_load_space(chip);
if (rc) {
@@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->context_tbl[i],
- space->context_buf, PAGE_SIZE,
+ space->context_buf, space->buf_size,
&offset);
if (rc == -ENOENT) {
space->context_tbl[i] = 0;
@@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->session_tbl[i],
- space->session_buf, PAGE_SIZE,
+ space->session_buf, space->buf_size,
&offset);
-
if (rc == -ENOENT) {
/* handle error saving session, just forget it */
space->session_tbl[i] = 0;
@@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
sizeof(space->context_tbl));
memcpy(&space->session_tbl, &chip->work_space.session_tbl,
sizeof(space->session_tbl));
- memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
- memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
+ memcpy(space->context_buf, chip->work_space.context_buf,
+ space->buf_size);
+ memcpy(space->session_buf, chip->work_space.session_buf,
+ space->buf_size);
return 0;
out:
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2491a2cb54a2..2ccdf8ac6994 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -214,11 +214,10 @@ static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
* Return:
* On success, 0. On failure, -errno.
*/
-static int ftpm_tee_probe(struct platform_device *pdev)
+static int ftpm_tee_probe(struct device *dev)
{
int rc;
struct tpm_chip *chip;
- struct device *dev = &pdev->dev;
struct ftpm_tee_private *pvt_data = NULL;
struct tee_ioctl_open_session_arg sess_arg;
@@ -297,6 +296,13 @@ out_tee_session:
return rc;
}
+static int ftpm_plat_tee_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_probe(dev);
+}
+
/**
* ftpm_tee_remove() - remove the TPM device
* @pdev: the platform_device description.
@@ -304,9 +310,9 @@ out_tee_session:
* Return:
* 0 always.
*/
-static int ftpm_tee_remove(struct platform_device *pdev)
+static int ftpm_tee_remove(struct device *dev)
{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(dev);
/* Release the chip */
tpm_chip_unregister(pvt_data->chip);
@@ -328,11 +334,18 @@ static int ftpm_tee_remove(struct platform_device *pdev)
return 0;
}
+static int ftpm_plat_tee_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ftpm_tee_remove(dev);
+}
+
/**
* ftpm_tee_shutdown() - shutdown the TPM device
* @pdev: the platform_device description.
*/
-static void ftpm_tee_shutdown(struct platform_device *pdev)
+static void ftpm_plat_tee_shutdown(struct platform_device *pdev)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
@@ -347,17 +360,54 @@ static const struct of_device_id of_ftpm_tee_ids[] = {
};
MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
-static struct platform_driver ftpm_tee_driver = {
+static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
.of_match_table = of_match_ptr(of_ftpm_tee_ids),
},
- .probe = ftpm_tee_probe,
- .remove = ftpm_tee_remove,
- .shutdown = ftpm_tee_shutdown,
+ .shutdown = ftpm_plat_tee_shutdown,
+ .probe = ftpm_plat_tee_probe,
+ .remove = ftpm_plat_tee_remove,
+};
+
+/* UUID of the fTPM TA */
+static const struct tee_client_device_id optee_ftpm_id_table[] = {
+ {UUID_INIT(0xbc50d971, 0xd4c9, 0x42c4,
+ 0x82, 0xcb, 0x34, 0x3f, 0xb7, 0xf3, 0x78, 0x96)},
+ {}
};
-module_platform_driver(ftpm_tee_driver);
+MODULE_DEVICE_TABLE(tee, optee_ftpm_id_table);
+
+static struct tee_client_driver ftpm_tee_driver = {
+ .id_table = optee_ftpm_id_table,
+ .driver = {
+ .name = "optee-ftpm",
+ .bus = &tee_bus_type,
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+ },
+};
+
+static int __init ftpm_mod_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&ftpm_tee_plat_driver);
+ if (rc)
+ return rc;
+
+ return driver_register(&ftpm_tee_driver.driver);
+}
+
+static void __exit ftpm_mod_exit(void)
+{
+ platform_driver_unregister(&ftpm_tee_plat_driver);
+ driver_unregister(&ftpm_tee_driver.driver);
+}
+
+module_init(ftpm_mod_init);
+module_exit(ftpm_mod_exit);
MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index 7a0a7051a06f..eef0fb06ea83 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
if (priv == NULL)
return -ENOMEM;
- rc = tpm2_init_space(&priv->space);
+ rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
kfree(priv);
return -ENOMEM;
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 56db949a7b70..6a0059e508e3 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -172,7 +172,7 @@ static struct tty_driver *ttyprintk_driver;
static int __init ttyprintk_init(void)
{
- int ret = -ENOMEM;
+ int ret;
spin_lock_init(&tpk_port.spinlock);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ca691bce9791..a2da8f768b94 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2112,18 +2112,18 @@ fail:
return err;
}
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
};
MODULE_DEVICE_TABLE(virtio, id_table);
-static unsigned int features[] = {
+static const unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
VIRTIO_CONSOLE_F_MULTIPORT,
};
-static struct virtio_device_id rproc_serial_id_table[] = {
+static const struct virtio_device_id rproc_serial_id_table[] = {
#if IS_ENABLED(CONFIG_REMOTEPROC)
{ VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
#endif
@@ -2131,7 +2131,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
};
MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
-static unsigned int rproc_serial_features[] = {
+static const unsigned int rproc_serial_features[] = {
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 2ca1f2ac38a6..070dc47e95a1 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -56,7 +56,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_gate *gate = to_clk_gate(hw);
int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
u32 reg;
set ^= enable;
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index c491f5de0f3f..c754dfbb73fd 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
{
int ret;
+ unsigned long min_rate, max_rate;
+
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
@@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
sclk->hw.init = &init;
ret = devm_clk_hw_register(dev, &sclk->hw);
- if (!ret)
- clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
- sclk->info->range.max_rate);
+ if (ret)
+ return ret;
+
+ if (sclk->info->rate_discrete) {
+ int num_rates = sclk->info->list.num_rates;
+
+ if (num_rates <= 0)
+ return -EINVAL;
+
+ min_rate = sclk->info->list.rates[0];
+ max_rate = sclk->info->list.rates[num_rates - 1];
+ } else {
+ min_rate = sclk->info->range.min_rate;
+ max_rate = sclk->info->range.max_rate;
+ }
+
+ clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
return ret;
}
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index b4d9db9d5bf1..ca747712400f 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -680,6 +680,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0);
hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0);
hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0);
+ hws[IMX8MP_CLK_MU_ROOT] = imx_clk_hw_gate4("mu_root_clk", "ipg_root", ccm_base + 0x4210, 0);
hws[IMX8MP_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", ccm_base + 0x4220, 0);
hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0);
hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0);
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index cd04e7dc1878..5129ef8e1d6e 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -438,6 +438,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_SNVS] = imx_clk_gate2("snvs-rtc", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(7));
clk[VF610_CLK_DAP] = imx_clk_gate("dap", "platform_bus", CCM_CCSR, 24);
clk[VF610_CLK_OCOTP] = imx_clk_gate("ocotp", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(5));
+ clk[VF610_CLK_CAAM] = imx_clk_gate2("caam", "ipg_bus", CCM_CCGR11, CCM_CCGRx_CGn(0));
imx_check_clocks(clk, ARRAY_SIZE(clk));
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index c08dec30bfa6..fed194169666 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -147,7 +147,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
struct clk_pll *pll = to_clk_pll(hw);
struct pll_rate_tbl *rtbl = pll->vco->rtbl;
unsigned long flags = 0, val;
- int uninitialized_var(i);
+ int i = 0;
clk_pll_round_rate_index(hw, drate, NULL, &i);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index a156bd0c6af7..f1adc858b590 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -790,7 +790,6 @@ static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
struct stm_fs params;
long hwrate;
- int uninitialized_var(i);
if (!rate || !parent_rate)
return -EINVAL;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 91418381fcd4..2ed8b4361d95 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -616,8 +616,9 @@ config CLKSRC_IMX_GPT
config CLKSRC_IMX_TPM
bool "Clocksource using i.MX TPM" if COMPILE_TEST
- depends on ARM && CLKDEV_LOOKUP
+ depends on (ARM || ARM64) && CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select TIMER_OF
help
Enable this option to use IMX Timer/PWM Module (TPM) timer as
clocksource.
@@ -696,8 +697,18 @@ config INGENIC_TIMER
help
Support for the timer/counter unit of the Ingenic JZ SoCs.
+config INGENIC_SYSOST
+ bool "Clocksource/timer using the SYSOST in Ingenic X SoCs"
+ depends on MIPS || COMPILE_TEST
+ depends on COMMON_CLK
+ select MFD_SYSCON
+ select TIMER_OF
+ select IRQ_DOMAIN
+ help
+ Support for the SYSOST of the Ingenic X Series SoCs.
+
config INGENIC_OST
- bool "Clocksource for Ingenic OS Timer"
+ bool "Clocksource using the OST in Ingenic JZ SoCs"
depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select MFD_SYSCON
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index bdda1a2e4097..3994e221e262 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_INGENIC_OST) += ingenic-ost.o
+obj-$(CONFIG_INGENIC_SYSOST) += ingenic-sysost.o
obj-$(CONFIG_INGENIC_TIMER) += ingenic-timer.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
new file mode 100644
index 000000000000..e77d58449005
--- /dev/null
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic XBurst SoCs SYSOST clocks driver
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/ingenic,sysost.h>
+
+/* OST register offsets */
+#define OST_REG_OSTCCR 0x00
+#define OST_REG_OSTCR 0x08
+#define OST_REG_OSTFR 0x0c
+#define OST_REG_OSTMR 0x10
+#define OST_REG_OST1DFR 0x14
+#define OST_REG_OST1CNT 0x18
+#define OST_REG_OST2CNTL 0x20
+#define OST_REG_OSTCNT2HBUF 0x24
+#define OST_REG_OSTESR 0x34
+#define OST_REG_OSTECR 0x38
+
+/* bits within the OSTCCR register */
+#define OSTCCR_PRESCALE1_MASK 0x3
+#define OSTCCR_PRESCALE2_MASK 0xc
+#define OSTCCR_PRESCALE1_LSB 0
+#define OSTCCR_PRESCALE2_LSB 2
+
+/* bits within the OSTCR register */
+#define OSTCR_OST1CLR BIT(0)
+#define OSTCR_OST2CLR BIT(1)
+
+/* bits within the OSTFR register */
+#define OSTFR_FFLAG BIT(0)
+
+/* bits within the OSTMR register */
+#define OSTMR_FMASK BIT(0)
+
+/* bits within the OSTESR register */
+#define OSTESR_OST1ENS BIT(0)
+#define OSTESR_OST2ENS BIT(1)
+
+/* bits within the OSTECR register */
+#define OSTECR_OST1ENC BIT(0)
+#define OSTECR_OST2ENC BIT(1)
+
+struct ingenic_soc_info {
+ unsigned int num_channels;
+};
+
+struct ingenic_ost_clk_info {
+ struct clk_init_data init_data;
+ u8 ostccr_reg;
+};
+
+struct ingenic_ost_clk {
+ struct clk_hw hw;
+ unsigned int idx;
+ struct ingenic_ost *ost;
+ const struct ingenic_ost_clk_info *info;
+};
+
+struct ingenic_ost {
+ void __iomem *base;
+ const struct ingenic_soc_info *soc_info;
+ struct clk *clk, *percpu_timer_clk, *global_timer_clk;
+ struct clock_event_device cevt;
+ struct clocksource cs;
+ char name[20];
+
+ struct clk_hw_onecell_data *clocks;
+};
+
+static struct ingenic_ost *ingenic_ost;
+
+static inline struct ingenic_ost_clk *to_ost_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct ingenic_ost_clk, hw);
+}
+
+static unsigned long ingenic_ost_percpu_timer_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
+ const struct ingenic_ost_clk_info *info = ost_clk->info;
+ unsigned int prescale;
+
+ prescale = readl(ost_clk->ost->base + info->ostccr_reg);
+
+ prescale = (prescale & OSTCCR_PRESCALE1_MASK) >> OSTCCR_PRESCALE1_LSB;
+
+ return parent_rate >> (prescale * 2);
+}
+
+static unsigned long ingenic_ost_global_timer_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
+ const struct ingenic_ost_clk_info *info = ost_clk->info;
+ unsigned int prescale;
+
+ prescale = readl(ost_clk->ost->base + info->ostccr_reg);
+
+ prescale = (prescale & OSTCCR_PRESCALE2_MASK) >> OSTCCR_PRESCALE2_LSB;
+
+ return parent_rate >> (prescale * 2);
+}
+
+static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
+{
+ u8 prescale;
+
+ for (prescale = 0; prescale < 2; prescale++)
+ if ((rate >> (prescale * 2)) <= req_rate)
+ return prescale;
+
+ return 2; /* /16 divider */
+}
+
+static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
+ unsigned long *parent_rate)
+{
+ unsigned long rate = *parent_rate;
+ u8 prescale;
+
+ if (req_rate > rate)
+ return rate;
+
+ prescale = ingenic_ost_get_prescale(rate, req_rate);
+
+ return rate >> (prescale * 2);
+}
+
+static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
+ unsigned long parent_rate)
+{
+ struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
+ const struct ingenic_ost_clk_info *info = ost_clk->info;
+ u8 prescale = ingenic_ost_get_prescale(parent_rate, req_rate);
+ int val;
+
+ val = readl(ost_clk->ost->base + info->ostccr_reg);
+ val = (val & ~OSTCCR_PRESCALE1_MASK) | (prescale << OSTCCR_PRESCALE1_LSB);
+ writel(val, ost_clk->ost->base + info->ostccr_reg);
+
+ return 0;
+}
+
+static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
+ unsigned long parent_rate)
+{
+ struct ingenic_ost_clk *ost_clk = to_ost_clk(hw);
+ const struct ingenic_ost_clk_info *info = ost_clk->info;
+ u8 prescale = ingenic_ost_get_prescale(parent_rate, req_rate);
+ int val;
+
+ val = readl(ost_clk->ost->base + info->ostccr_reg);
+ val = (val & ~OSTCCR_PRESCALE2_MASK) | (prescale << OSTCCR_PRESCALE2_LSB);
+ writel(val, ost_clk->ost->base + info->ostccr_reg);
+
+ return 0;
+}
+
+static const struct clk_ops ingenic_ost_percpu_timer_ops = {
+ .recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
+ .round_rate = ingenic_ost_round_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
+};
+
+static const struct clk_ops ingenic_ost_global_timer_ops = {
+ .recalc_rate = ingenic_ost_global_timer_recalc_rate,
+ .round_rate = ingenic_ost_round_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
+};
+
+static const char * const ingenic_ost_clk_parents[] = { "ext" };
+
+static const struct ingenic_ost_clk_info ingenic_ost_clk_info[] = {
+ [OST_CLK_PERCPU_TIMER] = {
+ .init_data = {
+ .name = "percpu timer",
+ .parent_names = ingenic_ost_clk_parents,
+ .num_parents = ARRAY_SIZE(ingenic_ost_clk_parents),
+ .ops = &ingenic_ost_percpu_timer_ops,
+ .flags = CLK_SET_RATE_UNGATE,
+ },
+ .ostccr_reg = OST_REG_OSTCCR,
+ },
+
+ [OST_CLK_GLOBAL_TIMER] = {
+ .init_data = {
+ .name = "global timer",
+ .parent_names = ingenic_ost_clk_parents,
+ .num_parents = ARRAY_SIZE(ingenic_ost_clk_parents),
+ .ops = &ingenic_ost_global_timer_ops,
+ .flags = CLK_SET_RATE_UNGATE,
+ },
+ .ostccr_reg = OST_REG_OSTCCR,
+ },
+};
+
+static u64 notrace ingenic_ost_global_timer_read_cntl(void)
+{
+ struct ingenic_ost *ost = ingenic_ost;
+ unsigned int count;
+
+ count = readl(ost->base + OST_REG_OST2CNTL);
+
+ return count;
+}
+
+static u64 notrace ingenic_ost_clocksource_read(struct clocksource *cs)
+{
+ return ingenic_ost_global_timer_read_cntl();
+}
+
+static inline struct ingenic_ost *to_ingenic_ost(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ingenic_ost, cevt);
+}
+
+static int ingenic_ost_cevt_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct ingenic_ost *ost = to_ingenic_ost(evt);
+
+ writel(OSTECR_OST1ENC, ost->base + OST_REG_OSTECR);
+
+ return 0;
+}
+
+static int ingenic_ost_cevt_set_next(unsigned long next,
+ struct clock_event_device *evt)
+{
+ struct ingenic_ost *ost = to_ingenic_ost(evt);
+
+ writel((u32)~OSTFR_FFLAG, ost->base + OST_REG_OSTFR);
+ writel(next, ost->base + OST_REG_OST1DFR);
+ writel(OSTCR_OST1CLR, ost->base + OST_REG_OSTCR);
+ writel(OSTESR_OST1ENS, ost->base + OST_REG_OSTESR);
+ writel((u32)~OSTMR_FMASK, ost->base + OST_REG_OSTMR);
+
+ return 0;
+}
+
+static irqreturn_t ingenic_ost_cevt_cb(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct ingenic_ost *ost = to_ingenic_ost(evt);
+
+ writel(OSTECR_OST1ENC, ost->base + OST_REG_OSTECR);
+
+ if (evt->event_handler)
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ingenic_ost_register_clock(struct ingenic_ost *ost,
+ unsigned int idx, const struct ingenic_ost_clk_info *info,
+ struct clk_hw_onecell_data *clocks)
+{
+ struct ingenic_ost_clk *ost_clk;
+ int val, err;
+
+ ost_clk = kzalloc(sizeof(*ost_clk), GFP_KERNEL);
+ if (!ost_clk)
+ return -ENOMEM;
+
+ ost_clk->hw.init = &info->init_data;
+ ost_clk->idx = idx;
+ ost_clk->info = info;
+ ost_clk->ost = ost;
+
+ /* Reset clock divider */
+ val = readl(ost->base + info->ostccr_reg);
+ val &= ~(OSTCCR_PRESCALE1_MASK | OSTCCR_PRESCALE2_MASK);
+ writel(val, ost->base + info->ostccr_reg);
+
+ err = clk_hw_register(NULL, &ost_clk->hw);
+ if (err) {
+ kfree(ost_clk);
+ return err;
+ }
+
+ clocks->hws[idx] = &ost_clk->hw;
+
+ return 0;
+}
+
+static struct clk * __init ingenic_ost_get_clock(struct device_node *np, int id)
+{
+ struct of_phandle_args args;
+
+ args.np = np;
+ args.args_count = 1;
+ args.args[0] = id;
+
+ return of_clk_get_from_provider(&args);
+}
+
+static int __init ingenic_ost_percpu_timer_init(struct device_node *np,
+ struct ingenic_ost *ost)
+{
+ unsigned int timer_virq, channel = OST_CLK_PERCPU_TIMER;
+ unsigned long rate;
+ int err;
+
+ ost->percpu_timer_clk = ingenic_ost_get_clock(np, channel);
+ if (IS_ERR(ost->percpu_timer_clk))
+ return PTR_ERR(ost->percpu_timer_clk);
+
+ err = clk_prepare_enable(ost->percpu_timer_clk);
+ if (err)
+ goto err_clk_put;
+
+ rate = clk_get_rate(ost->percpu_timer_clk);
+ if (!rate) {
+ err = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ timer_virq = of_irq_get(np, 0);
+ if (!timer_virq) {
+ err = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ snprintf(ost->name, sizeof(ost->name), "OST percpu timer");
+
+ err = request_irq(timer_virq, ingenic_ost_cevt_cb, IRQF_TIMER,
+ ost->name, &ost->cevt);
+ if (err)
+ goto err_irq_dispose_mapping;
+
+ ost->cevt.cpumask = cpumask_of(smp_processor_id());
+ ost->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
+ ost->cevt.name = ost->name;
+ ost->cevt.rating = 400;
+ ost->cevt.set_state_shutdown = ingenic_ost_cevt_set_state_shutdown;
+ ost->cevt.set_next_event = ingenic_ost_cevt_set_next;
+
+ clockevents_config_and_register(&ost->cevt, rate, 4, 0xffffffff);
+
+ return 0;
+
+err_irq_dispose_mapping:
+ irq_dispose_mapping(timer_virq);
+err_clk_disable:
+ clk_disable_unprepare(ost->percpu_timer_clk);
+err_clk_put:
+ clk_put(ost->percpu_timer_clk);
+ return err;
+}
+
+static int __init ingenic_ost_global_timer_init(struct device_node *np,
+ struct ingenic_ost *ost)
+{
+ unsigned int channel = OST_CLK_GLOBAL_TIMER;
+ struct clocksource *cs = &ost->cs;
+ unsigned long rate;
+ int err;
+
+ ost->global_timer_clk = ingenic_ost_get_clock(np, channel);
+ if (IS_ERR(ost->global_timer_clk))
+ return PTR_ERR(ost->global_timer_clk);
+
+ err = clk_prepare_enable(ost->global_timer_clk);
+ if (err)
+ goto err_clk_put;
+
+ rate = clk_get_rate(ost->global_timer_clk);
+ if (!rate) {
+ err = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ /* Clear counter CNT registers */
+ writel(OSTCR_OST2CLR, ost->base + OST_REG_OSTCR);
+
+ /* Enable OST channel */
+ writel(OSTESR_OST2ENS, ost->base + OST_REG_OSTESR);
+
+ cs->name = "ingenic-ost";
+ cs->rating = 400;
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->mask = CLOCKSOURCE_MASK(32);
+ cs->read = ingenic_ost_clocksource_read;
+
+ err = clocksource_register_hz(cs, rate);
+ if (err)
+ goto err_clk_disable;
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(ost->global_timer_clk);
+err_clk_put:
+ clk_put(ost->global_timer_clk);
+ return err;
+}
+
+static const struct ingenic_soc_info x1000_soc_info = {
+ .num_channels = 2,
+};
+
+static const struct of_device_id __maybe_unused ingenic_ost_of_match[] __initconst = {
+ { .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info, },
+ { /* sentinel */ }
+};
+
+static int __init ingenic_ost_probe(struct device_node *np)
+{
+ const struct of_device_id *id = of_match_node(ingenic_ost_of_match, np);
+ struct ingenic_ost *ost;
+ unsigned int i;
+ int ret;
+
+ ost = kzalloc(sizeof(*ost), GFP_KERNEL);
+ if (!ost)
+ return -ENOMEM;
+
+ ost->base = of_io_request_and_map(np, 0, of_node_full_name(np));
+ if (IS_ERR(ost->base)) {
+ pr_err("%s: Failed to map OST registers\n", __func__);
+ ret = PTR_ERR(ost->base);
+ goto err_free_ost;
+ }
+
+ ost->clk = of_clk_get_by_name(np, "ost");
+ if (IS_ERR(ost->clk)) {
+ ret = PTR_ERR(ost->clk);
+ pr_crit("%s: Cannot get OST clock\n", __func__);
+ goto err_free_ost;
+ }
+
+ ret = clk_prepare_enable(ost->clk);
+ if (ret) {
+ pr_crit("%s: Unable to enable OST clock\n", __func__);
+ goto err_put_clk;
+ }
+
+ ost->soc_info = id->data;
+
+ ost->clocks = kzalloc(struct_size(ost->clocks, hws, ost->soc_info->num_channels),
+ GFP_KERNEL);
+ if (!ost->clocks) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ ost->clocks->num = ost->soc_info->num_channels;
+
+ for (i = 0; i < ost->clocks->num; i++) {
+ ret = ingenic_ost_register_clock(ost, i, &ingenic_ost_clk_info[i], ost->clocks);
+ if (ret) {
+ pr_crit("%s: Cannot register clock %d\n", __func__, i);
+ goto err_unregister_ost_clocks;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, ost->clocks);
+ if (ret) {
+ pr_crit("%s: Cannot add OF clock provider\n", __func__);
+ goto err_unregister_ost_clocks;
+ }
+
+ ingenic_ost = ost;
+
+ return 0;
+
+err_unregister_ost_clocks:
+ for (i = 0; i < ost->clocks->num; i++)
+ if (ost->clocks->hws[i])
+ clk_hw_unregister(ost->clocks->hws[i]);
+ kfree(ost->clocks);
+err_clk_disable:
+ clk_disable_unprepare(ost->clk);
+err_put_clk:
+ clk_put(ost->clk);
+err_free_ost:
+ kfree(ost);
+ return ret;
+}
+
+static int __init ingenic_ost_init(struct device_node *np)
+{
+ struct ingenic_ost *ost;
+ unsigned long rate;
+ int ret;
+
+ ret = ingenic_ost_probe(np);
+ if (ret) {
+ pr_crit("%s: Failed to initialize OST clocks: %d\n", __func__, ret);
+ return ret;
+ }
+
+ of_node_clear_flag(np, OF_POPULATED);
+
+ ost = ingenic_ost;
+ if (IS_ERR(ost))
+ return PTR_ERR(ost);
+
+ ret = ingenic_ost_global_timer_init(np, ost);
+ if (ret) {
+ pr_crit("%s: Unable to init global timer: %x\n", __func__, ret);
+ goto err_free_ingenic_ost;
+ }
+
+ ret = ingenic_ost_percpu_timer_init(np, ost);
+ if (ret)
+ goto err_ost_global_timer_cleanup;
+
+ /* Register the sched_clock at the end as there's no way to undo it */
+ rate = clk_get_rate(ost->global_timer_clk);
+ sched_clock_register(ingenic_ost_global_timer_read_cntl, 32, rate);
+
+ return 0;
+
+err_ost_global_timer_cleanup:
+ clocksource_unregister(&ost->cs);
+ clk_disable_unprepare(ost->global_timer_clk);
+ clk_put(ost->global_timer_clk);
+err_free_ingenic_ost:
+ kfree(ost);
+ return ret;
+}
+
+TIMER_OF_DECLARE(x1000_ost, "ingenic,x1000-ost", ingenic_ost_init);
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 496333650de2..58fd9189fab7 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * JZ47xx SoCs TCU IRQ driver
+ * Ingenic SoCs TCU IRQ driver
* Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
+ * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/bitops.h>
@@ -15,24 +16,35 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched_clock.h>
#include <dt-bindings/clock/ingenic,tcu.h>
+static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
+
struct ingenic_soc_info {
unsigned int num_channels;
};
+struct ingenic_tcu_timer {
+ unsigned int cpu;
+ unsigned int channel;
+ struct clock_event_device cevt;
+ struct clk *clk;
+ char name[8];
+};
+
struct ingenic_tcu {
struct regmap *map;
- struct clk *timer_clk, *cs_clk;
- unsigned int timer_channel, cs_channel;
- struct clock_event_device cevt;
+ struct device_node *np;
+ struct clk *cs_clk;
+ unsigned int cs_channel;
struct clocksource cs;
- char name[4];
unsigned long pwm_channels_mask;
+ struct ingenic_tcu_timer timers[];
};
static struct ingenic_tcu *ingenic_tcu;
@@ -52,16 +64,24 @@ static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
return ingenic_tcu_timer_read();
}
-static inline struct ingenic_tcu *to_ingenic_tcu(struct clock_event_device *evt)
+static inline struct ingenic_tcu *
+to_ingenic_tcu(struct ingenic_tcu_timer *timer)
+{
+ return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
+}
+
+static inline struct ingenic_tcu_timer *
+to_ingenic_tcu_timer(struct clock_event_device *evt)
{
- return container_of(evt, struct ingenic_tcu, cevt);
+ return container_of(evt, struct ingenic_tcu_timer, cevt);
}
static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
{
- struct ingenic_tcu *tcu = to_ingenic_tcu(evt);
+ struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
+ struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
- regmap_write(tcu->map, TCU_REG_TECR, BIT(tcu->timer_channel));
+ regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
return 0;
}
@@ -69,27 +89,40 @@ static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
static int ingenic_tcu_cevt_set_next(unsigned long next,
struct clock_event_device *evt)
{
- struct ingenic_tcu *tcu = to_ingenic_tcu(evt);
+ struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
+ struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
if (next > 0xffff)
return -EINVAL;
- regmap_write(tcu->map, TCU_REG_TDFRc(tcu->timer_channel), next);
- regmap_write(tcu->map, TCU_REG_TCNTc(tcu->timer_channel), 0);
- regmap_write(tcu->map, TCU_REG_TESR, BIT(tcu->timer_channel));
+ regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
+ regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
+ regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
return 0;
}
+static void ingenic_per_cpu_event_handler(void *info)
+{
+ struct clock_event_device *cevt = (struct clock_event_device *) info;
+
+ cevt->event_handler(cevt);
+}
+
static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
{
- struct clock_event_device *evt = dev_id;
- struct ingenic_tcu *tcu = to_ingenic_tcu(evt);
+ struct ingenic_tcu_timer *timer = dev_id;
+ struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
+ call_single_data_t *csd;
- regmap_write(tcu->map, TCU_REG_TECR, BIT(tcu->timer_channel));
+ regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
- if (evt->event_handler)
- evt->event_handler(evt);
+ if (timer->cevt.event_handler) {
+ csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
+ csd->info = (void *) &timer->cevt;
+ csd->func = ingenic_per_cpu_event_handler;
+ smp_call_function_single_async(timer->cpu, csd);
+ }
return IRQ_HANDLED;
}
@@ -105,64 +138,66 @@ static struct clk * __init ingenic_tcu_get_clock(struct device_node *np, int id)
return of_clk_get_from_provider(&args);
}
-static int __init ingenic_tcu_timer_init(struct device_node *np,
- struct ingenic_tcu *tcu)
+static int ingenic_tcu_setup_cevt(unsigned int cpu)
{
- unsigned int timer_virq, channel = tcu->timer_channel;
+ struct ingenic_tcu *tcu = ingenic_tcu;
+ struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
+ unsigned int timer_virq;
struct irq_domain *domain;
unsigned long rate;
int err;
- tcu->timer_clk = ingenic_tcu_get_clock(np, channel);
- if (IS_ERR(tcu->timer_clk))
- return PTR_ERR(tcu->timer_clk);
+ timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
+ if (IS_ERR(timer->clk))
+ return PTR_ERR(timer->clk);
- err = clk_prepare_enable(tcu->timer_clk);
+ err = clk_prepare_enable(timer->clk);
if (err)
goto err_clk_put;
- rate = clk_get_rate(tcu->timer_clk);
+ rate = clk_get_rate(timer->clk);
if (!rate) {
err = -EINVAL;
goto err_clk_disable;
}
- domain = irq_find_host(np);
+ domain = irq_find_host(tcu->np);
if (!domain) {
err = -ENODEV;
goto err_clk_disable;
}
- timer_virq = irq_create_mapping(domain, channel);
+ timer_virq = irq_create_mapping(domain, timer->channel);
if (!timer_virq) {
err = -EINVAL;
goto err_clk_disable;
}
- snprintf(tcu->name, sizeof(tcu->name), "TCU");
+ snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
- tcu->name, &tcu->cevt);
+ timer->name, timer);
if (err)
goto err_irq_dispose_mapping;
- tcu->cevt.cpumask = cpumask_of(smp_processor_id());
- tcu->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
- tcu->cevt.name = tcu->name;
- tcu->cevt.rating = 200;
- tcu->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
- tcu->cevt.set_next_event = ingenic_tcu_cevt_set_next;
+ timer->cpu = smp_processor_id();
+ timer->cevt.cpumask = cpumask_of(smp_processor_id());
+ timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
+ timer->cevt.name = timer->name;
+ timer->cevt.rating = 200;
+ timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
+ timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
- clockevents_config_and_register(&tcu->cevt, rate, 10, 0xffff);
+ clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
return 0;
err_irq_dispose_mapping:
irq_dispose_mapping(timer_virq);
err_clk_disable:
- clk_disable_unprepare(tcu->timer_clk);
+ clk_disable_unprepare(timer->clk);
err_clk_put:
- clk_put(tcu->timer_clk);
+ clk_put(timer->clk);
return err;
}
@@ -238,10 +273,12 @@ static int __init ingenic_tcu_init(struct device_node *np)
{
const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
const struct ingenic_soc_info *soc_info = id->data;
+ struct ingenic_tcu_timer *timer;
struct ingenic_tcu *tcu;
struct regmap *map;
+ unsigned int cpu;
+ int ret, last_bit = -1;
long rate;
- int ret;
of_node_clear_flag(np, OF_POPULATED);
@@ -249,17 +286,23 @@ static int __init ingenic_tcu_init(struct device_node *np)
if (IS_ERR(map))
return PTR_ERR(map);
- tcu = kzalloc(sizeof(*tcu), GFP_KERNEL);
+ tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
+ GFP_KERNEL);
if (!tcu)
return -ENOMEM;
- /* Enable all TCU channels for PWM use by default except channels 0/1 */
- tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1, 2);
+ /*
+ * Enable all TCU channels for PWM use by default except channels 0/1,
+ * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
+ */
+ tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
+ num_possible_cpus() + 1);
of_property_read_u32(np, "ingenic,pwm-channels-mask",
(u32 *)&tcu->pwm_channels_mask);
- /* Verify that we have at least two free channels */
- if (hweight8(tcu->pwm_channels_mask) > soc_info->num_channels - 2) {
+ /* Verify that we have at least num_possible_cpus() + 1 free channels */
+ if (hweight8(tcu->pwm_channels_mask) >
+ soc_info->num_channels - num_possible_cpus() + 1) {
pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
tcu->pwm_channels_mask);
ret = -EINVAL;
@@ -267,13 +310,22 @@ static int __init ingenic_tcu_init(struct device_node *np)
}
tcu->map = map;
+ tcu->np = np;
ingenic_tcu = tcu;
- tcu->timer_channel = find_first_zero_bit(&tcu->pwm_channels_mask,
- soc_info->num_channels);
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ timer = &tcu->timers[cpu];
+
+ timer->cpu = cpu;
+ timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
+ soc_info->num_channels,
+ last_bit + 1);
+ last_bit = timer->channel;
+ }
+
tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
soc_info->num_channels,
- tcu->timer_channel + 1);
+ last_bit + 1);
ret = ingenic_tcu_clocksource_init(np, tcu);
if (ret) {
@@ -281,9 +333,13 @@ static int __init ingenic_tcu_init(struct device_node *np)
goto err_free_ingenic_tcu;
}
- ret = ingenic_tcu_timer_init(np, tcu);
- if (ret)
+ /* Setup clock events on each CPU core */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
+ ingenic_tcu_setup_cevt, NULL);
+ if (ret < 0) {
+ pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
goto err_tcu_clocksource_cleanup;
+ }
/* Register the sched_clock at the end as there's no way to undo it */
rate = clk_get_rate(tcu->cs_clk);
@@ -315,28 +371,38 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev)
static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
+ unsigned int cpu;
clk_disable(tcu->cs_clk);
- clk_disable(tcu->timer_clk);
+
+ for (cpu = 0; cpu < num_online_cpus(); cpu++)
+ clk_disable(tcu->timers[cpu].clk);
+
return 0;
}
static int __maybe_unused ingenic_tcu_resume(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
+ unsigned int cpu;
int ret;
- ret = clk_enable(tcu->timer_clk);
- if (ret)
- return ret;
+ for (cpu = 0; cpu < num_online_cpus(); cpu++) {
+ ret = clk_enable(tcu->timers[cpu].clk);
+ if (ret)
+ goto err_timer_clk_disable;
+ }
ret = clk_enable(tcu->cs_clk);
- if (ret) {
- clk_disable(tcu->timer_clk);
- return ret;
- }
+ if (ret)
+ goto err_timer_clk_disable;
return 0;
+
+err_timer_clk_disable:
+ for (; cpu > 0; cpu--)
+ clk_disable(tcu->timers[cpu - 1].clk);
+ return ret;
}
static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index f49a631d8f58..1cf3304652d6 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -186,6 +186,7 @@ static int __init nmdk_timer_init(void __iomem *base, int irq,
{
unsigned long rate;
int ret;
+ int min_ticks;
mtu_base = base;
@@ -194,7 +195,8 @@ static int __init nmdk_timer_init(void __iomem *base, int irq,
/*
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
- * for ux500.
+ * for ux500, and in one specific Ux500 case 32768 Hz.
+ *
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
* At 32 MHz, the timer (with 32 bit counter) can be programmed
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
@@ -230,7 +232,12 @@ static int __init nmdk_timer_init(void __iomem *base, int irq,
pr_err("%s: request_irq() failed\n", "Nomadik Timer Tick");
nmdk_clkevt.cpumask = cpumask_of(0);
nmdk_clkevt.irq = irq;
- clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+ if (rate < 100000)
+ min_ticks = 5;
+ else
+ min_ticks = 2;
+ clockevents_config_and_register(&nmdk_clkevt, rate, min_ticks,
+ 0xffffffffU);
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 12ac75f7571f..760777458a90 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -349,7 +349,7 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
/*
* According to the sh73a0 user's manual, as CMCNT can be operated
- * only by the RCLK (Pseudo 32 KHz), there's one restriction on
+ * only by the RCLK (Pseudo 32 kHz), there's one restriction on
* modifying CMCNT register; two RCLK cycles are necessary before
* this register is either read or any modification of the value
* it holds is reflected in the LSI's actual operation.
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 7427b07495a8..787dbebbb432 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -27,9 +27,10 @@
* - Some chips support 32 bit counter. A single channel is used for
* this 32 bit free-running counter. the second channel is not used.
*
- * - The third channel may be used to provide a 16-bit clockevent
- * source, used in either periodic or oneshot mode. This runs
- * at 32 KiHZ, and can handle delays of up to two seconds.
+ * - The third channel may be used to provide a clockevent source, used in
+ * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ,
+ * and can handle delays of up to two seconds. For 32-bit counters, it runs at
+ * the same rate as the clocksource
*
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
@@ -47,6 +48,8 @@ static struct
} tcb_cache[3];
static u32 bmr_cache;
+static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 };
+
static u64 tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
@@ -143,6 +146,7 @@ static unsigned long notrace tc_delay_timer_read32(void)
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
+ u32 rate;
void __iomem *regs;
};
@@ -151,13 +155,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
static u32 timer_clock;
static int tc_shutdown(struct clock_event_device *d)
@@ -183,7 +180,7 @@ static int tc_set_oneshot(struct clock_event_device *d)
clk_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and stop */
+ /* count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -205,10 +202,10 @@ static int tc_set_periodic(struct clock_event_device *d)
*/
clk_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and restart */
+ /* count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+ writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -256,47 +253,55 @@ static irqreturn_t ch2_irq(int irq, void *handle)
return IRQ_NONE;
}
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-
- ret = clk_prepare_enable(tc->slow_clk);
- if (ret)
- return ret;
+ int bits = tc->tcb_config->counter_width;
/* try to enable t2 clk to avoid future errors in mode change */
ret = clk_prepare_enable(t2_clk);
- if (ret) {
- clk_disable_unprepare(tc->slow_clk);
+ if (ret)
return ret;
- }
-
- clk_disable(t2_clk);
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
- timer_clock = clk32k_divisor_idx;
+ if (bits == 32) {
+ timer_clock = divisor_idx;
+ clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx];
+ } else {
+ ret = clk_prepare_enable(tc->slow_clk);
+ if (ret) {
+ clk_disable_unprepare(t2_clk);
+ return ret;
+ }
+
+ clkevt.rate = clk_get_rate(tc->slow_clk);
+ timer_clock = ATMEL_TC_TIMER_CLOCK5;
+ }
+
+ clk_disable(t2_clk);
clkevt.clkevt.cpumask = cpumask_of(0);
ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
if (ret) {
clk_unprepare(t2_clk);
- clk_disable_unprepare(tc->slow_clk);
+ if (bits != 32)
+ clk_disable_unprepare(tc->slow_clk);
return ret;
}
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+ clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1);
return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
/* NOTHING */
return 0;
@@ -346,11 +351,23 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
-static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+static struct atmel_tcb_config tcb_rm9200_config = {
+ .counter_width = 16,
+};
+
+static struct atmel_tcb_config tcb_sam9x5_config = {
+ .counter_width = 32,
+};
+
+static struct atmel_tcb_config tcb_sama5d2_config = {
+ .counter_width = 32,
+ .has_gclk = 1,
+};
static const struct of_device_id atmel_tcb_of_match[] = {
- { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
- { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
+ { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
+ { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
{ /* sentinel */ }
};
@@ -362,7 +379,6 @@ static int __init tcb_clksrc_init(struct device_node *node)
u64 (*tc_sched_clock)(void);
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
- int clk32k_divisor_idx = -1;
int bits;
int i;
int ret;
@@ -399,7 +415,11 @@ static int __init tcb_clksrc_init(struct device_node *node)
}
match = of_match_node(atmel_tcb_of_match, node->parent);
- bits = (uintptr_t)match->data;
+ if (!match)
+ return -ENODEV;
+
+ tc.tcb_config = match->data;
+ bits = tc.tcb_config->counter_width;
for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
@@ -412,22 +432,17 @@ static int __init tcb_clksrc_init(struct device_node *node)
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
- for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
+ i = 0;
+ if (tc.tcb_config->has_gclk)
+ i = 1;
+ for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
unsigned divisor = atmel_tcb_divisors[i];
unsigned tmp;
- /* remember 32 KiHz clock for later */
- if (!divisor) {
- clk32k_divisor_idx = i;
- continue;
- }
-
tmp = rate / divisor;
pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
- if (best_divisor_idx > 0) {
- if (tmp < 5 * 1000 * 1000)
- continue;
- }
+ if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000))
+ break;
divided_rate = tmp;
best_divisor_idx = i;
}
@@ -467,7 +482,7 @@ static int __init tcb_clksrc_init(struct device_node *node)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- ret = setup_clkevents(&tc, clk32k_divisor_idx);
+ ret = setup_clkevents(&tc, best_divisor_idx);
if (ret)
goto err_unregister_clksrc;
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index ae12bbf3d68c..59b0be482f32 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -21,7 +21,7 @@
* Roughly modelled after the OMAP1 MPU timer code.
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 60aff087947a..33eeabf9c3d1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -4,7 +4,7 @@
*
* OMAP Dual-Mode Timers
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Tarun Kanti DebBarma <tarun.kanti@ti.com>
* Thara Gopinath <thara@ti.com>
*
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f6670c4abbb0..089938ead681 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -108,4 +108,3 @@ obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
-obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 429e5a36c08a..e4ff681faaaa 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -244,7 +244,7 @@ static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
{
- u32 val, dummy;
+ u32 val, dummy __always_unused;
rdmsr(MSR_IA32_PERF_CTL, val, dummy);
return val;
@@ -261,7 +261,7 @@ static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
{
- u32 val, dummy;
+ u32 val, dummy __always_unused;
rdmsr(MSR_AMD_PERF_CTL, val, dummy);
return val;
@@ -612,7 +612,7 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
/* Intel Xeon Processor 7100 Series Specification Update
- * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
+ * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
* AL30: A Machine Check Exception (MCE) Occurring during an
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
* Both Processor Cores to Lock Up. */
@@ -993,14 +993,14 @@ MODULE_PARM_DESC(acpi_pstate_strict,
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
-static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
-static const struct acpi_device_id processor_device_ids[] = {
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, },
{ACPI_PROCESSOR_DEVICE_HID, },
{},
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index f7c4206d4c90..d0b10baf039a 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -144,7 +144,7 @@ static void __exit amd_freq_sensitivity_exit(void)
}
module_exit(amd_freq_sensitivity_exit);
-static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
+static const struct x86_cpu_id __maybe_unused amd_freq_sensitivity_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
{}
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 79742bbd221f..944d7b45afe9 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -279,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0128de3603df..17c1c3becd92 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -50,7 +50,9 @@ static LIST_HEAD(cpufreq_governor_list);
#define for_each_governor(__governor) \
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
-/**
+static char default_governor[CPUFREQ_NAME_LEN];
+
+/*
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
* also protects the cpufreq_cpu_data array.
@@ -78,7 +80,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
-/**
+/*
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
* "transition" list for kernel code that needs to handle
@@ -298,7 +300,7 @@ struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
-/**
+/*
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
@@ -524,6 +526,7 @@ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
/**
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
* one.
+ * @policy: associated policy to interrogate
* @target_freq: target frequency to resolve.
*
* The target to driver frequency mapping is cached in the policy.
@@ -621,6 +624,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
+static struct cpufreq_governor *get_governor(const char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ mutex_lock(&cpufreq_governor_mutex);
+ t = find_governor(str_governor);
+ if (!t)
+ goto unlock;
+
+ if (!try_module_get(t->owner))
+ t = NULL;
+
+unlock:
+ mutex_unlock(&cpufreq_governor_mutex);
+
+ return t;
+}
+
static unsigned int cpufreq_parse_policy(char *str_governor)
{
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
@@ -640,31 +661,17 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{
struct cpufreq_governor *t;
- mutex_lock(&cpufreq_governor_mutex);
-
- t = find_governor(str_governor);
- if (!t) {
- int ret;
-
- mutex_unlock(&cpufreq_governor_mutex);
-
- ret = request_module("cpufreq_%s", str_governor);
- if (ret)
- return NULL;
+ t = get_governor(str_governor);
+ if (t)
+ return t;
- mutex_lock(&cpufreq_governor_mutex);
-
- t = find_governor(str_governor);
- }
- if (t && !try_module_get(t->owner))
- t = NULL;
-
- mutex_unlock(&cpufreq_governor_mutex);
+ if (request_module("cpufreq_%s", str_governor))
+ return NULL;
- return t;
+ return get_governor(str_governor);
}
-/**
+/*
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
*
@@ -706,7 +713,7 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret;
}
-/**
+/*
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
#define store_one(file_name, object) \
@@ -727,7 +734,7 @@ static ssize_t store_##file_name \
store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);
-/**
+/*
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
@@ -741,7 +748,7 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
return sprintf(buf, "<unknown>\n");
}
-/**
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -756,7 +763,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
return -EINVAL;
}
-/**
+/*
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
@@ -793,7 +800,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
return ret ? ret : count;
}
-/**
+/*
* show_scaling_driver - show the cpufreq driver currently loaded
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
@@ -801,7 +808,7 @@ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}
-/**
+/*
* show_scaling_available_governors - show the available CPUfreq governors
*/
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
@@ -815,12 +822,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
goto out;
}
+ mutex_lock(&cpufreq_governor_mutex);
for_each_governor(t) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
- goto out;
+ break;
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
+ mutex_unlock(&cpufreq_governor_mutex);
out:
i += sprintf(&buf[i], "\n");
return i;
@@ -843,7 +852,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
-/**
+/*
* show_related_cpus - show the CPUs affected by each transition even if
* hw coordination is in use
*/
@@ -852,7 +861,7 @@ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(policy->related_cpus, buf);
}
-/**
+/*
* show_affected_cpus - show the CPUs affected by each transition
*/
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
@@ -886,7 +895,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
return policy->governor->show_setspeed(policy, buf);
}
-/**
+/*
* show_bios_limit - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
@@ -1048,36 +1057,36 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return 0;
}
-__weak struct cpufreq_governor *cpufreq_default_governor(void)
-{
- return NULL;
-}
-
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *def_gov = cpufreq_default_governor();
struct cpufreq_governor *gov = NULL;
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
+ int ret;
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = find_governor(policy->last_governor);
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, policy->cpu);
- } else if (def_gov) {
- gov = def_gov;
+ gov->name, policy->cpu);
} else {
- return -ENODATA;
+ gov = get_governor(default_governor);
+ }
+
+ if (!gov) {
+ gov = cpufreq_default_governor();
+ __module_get(gov->owner);
}
+
} else {
+
/* Use the default policy if there is no last_policy. */
if (policy->last_policy) {
pol = policy->last_policy;
- } else if (def_gov) {
- pol = cpufreq_parse_policy(def_gov->name);
+ } else {
+ pol = cpufreq_parse_policy(default_governor);
/*
- * In case the default governor is neiter "performance"
+ * In case the default governor is neither "performance"
* nor "powersave", fall back to the initial policy
* value set by the driver.
*/
@@ -1089,7 +1098,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
return -ENODATA;
}
- return cpufreq_set_policy(policy, gov, pol);
+ ret = cpufreq_set_policy(policy, gov, pol);
+ if (gov)
+ module_put(gov->owner);
+
+ return ret;
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
@@ -1604,7 +1617,7 @@ unlock:
return 0;
}
-/**
+/*
* cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
@@ -2361,6 +2374,7 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* cpufreq_get_policy - get the current cpufreq_policy
* @policy: struct cpufreq_policy into which the current cpufreq_policy
* is written
+ * @cpu: CPU to find the policy for
*
* Reads the current cpufreq policy.
*/
@@ -2747,7 +2761,7 @@ out:
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
-/**
+/*
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
* Unregister the current CPUFreq driver. Only call this if you have
@@ -2783,13 +2797,19 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
+ struct cpufreq_governor *gov = cpufreq_default_governor();
+
if (cpufreq_disabled())
return -ENODEV;
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
+ if (!strlen(default_governor))
+ strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
+
return 0;
}
module_param(off, int, 0444);
+module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 737ff3b9c2c0..aa39ff31ec9f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -322,17 +322,7 @@ static struct dbs_governor cs_governor = {
.start = cs_start,
};
-#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
-
-static int __init cpufreq_gov_dbs_init(void)
-{
- return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
-}
-
-static void __exit cpufreq_gov_dbs_exit(void)
-{
- cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
-}
+#define CPU_FREQ_GOV_CONSERVATIVE (cs_governor.gov)
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
@@ -343,11 +333,9 @@ MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
struct cpufreq_governor *cpufreq_default_governor(void)
{
- return CPU_FREQ_GOV_CONSERVATIVE;
+ return &CPU_FREQ_GOV_CONSERVATIVE;
}
-
-core_initcall(cpufreq_gov_dbs_init);
-#else
-module_init(cpufreq_gov_dbs_init);
#endif
-module_exit(cpufreq_gov_dbs_exit);
+
+cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE);
+cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index f99ae45efaea..63f7c219062b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,7 +26,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
static DEFINE_MUTEX(gov_dbs_data_mutex);
/* Common sysfs tunables */
-/**
+/*
* store_sampling_rate - update sampling rate effective immediately if needed.
*
* If new rate is smaller than the old, simply updating
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 82a4d37ddecb..ac361a8b1d3b 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -408,7 +408,7 @@ static struct dbs_governor od_dbs_gov = {
.start = od_start,
};
-#define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
+#define CPU_FREQ_GOV_ONDEMAND (od_dbs_gov.gov)
static void od_set_powersave_bias(unsigned int powersave_bias)
{
@@ -429,7 +429,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
continue;
policy = cpufreq_cpu_get_raw(cpu);
- if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND)
+ if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
continue;
policy_dbs = policy->governor_data;
@@ -461,16 +461,6 @@ void od_unregister_powersave_bias_handler(void)
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
-static int __init cpufreq_gov_dbs_init(void)
-{
- return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
-}
-
-static void __exit cpufreq_gov_dbs_exit(void)
-{
- cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
-}
-
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
@@ -480,11 +470,9 @@ MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
struct cpufreq_governor *cpufreq_default_governor(void)
{
- return CPU_FREQ_GOV_ONDEMAND;
+ return &CPU_FREQ_GOV_ONDEMAND;
}
-
-core_initcall(cpufreq_gov_dbs_init);
-#else
-module_init(cpufreq_gov_dbs_init);
#endif
-module_exit(cpufreq_gov_dbs_exit);
+
+cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
+cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index def9afe0f5b8..71c1d9aba772 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_performance = {
.limits = cpufreq_gov_performance_limits,
};
-static int __init cpufreq_gov_performance_init(void)
-{
- return cpufreq_register_governor(&cpufreq_gov_performance);
-}
-
-static void __exit cpufreq_gov_performance_exit(void)
-{
- cpufreq_unregister_governor(&cpufreq_gov_performance);
-}
-
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
struct cpufreq_governor *cpufreq_default_governor(void)
{
@@ -50,5 +40,5 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
MODULE_LICENSE("GPL");
-core_initcall(cpufreq_gov_performance_init);
-module_exit(cpufreq_gov_performance_exit);
+cpufreq_governor_init(cpufreq_gov_performance);
+cpufreq_governor_exit(cpufreq_gov_performance);
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 1ae66019eb83..7749522355b5 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
.owner = THIS_MODULE,
};
-static int __init cpufreq_gov_powersave_init(void)
-{
- return cpufreq_register_governor(&cpufreq_gov_powersave);
-}
-
-static void __exit cpufreq_gov_powersave_exit(void)
-{
- cpufreq_unregister_governor(&cpufreq_gov_powersave);
-}
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
MODULE_LICENSE("GPL");
@@ -42,9 +32,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
{
return &cpufreq_gov_powersave;
}
-
-core_initcall(cpufreq_gov_powersave_init);
-#else
-module_init(cpufreq_gov_powersave_init);
#endif
-module_exit(cpufreq_gov_powersave_exit);
+
+cpufreq_governor_init(cpufreq_gov_powersave);
+cpufreq_governor_exit(cpufreq_gov_powersave);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index b43e7cd502c5..50a4d7846580 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -126,16 +126,6 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.owner = THIS_MODULE,
};
-static int __init cpufreq_gov_userspace_init(void)
-{
- return cpufreq_register_governor(&cpufreq_gov_userspace);
-}
-
-static void __exit cpufreq_gov_userspace_exit(void)
-{
- cpufreq_unregister_governor(&cpufreq_gov_userspace);
-}
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
"Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
@@ -146,9 +136,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
{
return &cpufreq_gov_userspace;
}
-
-core_initcall(cpufreq_gov_userspace_init);
-#else
-module_init(cpufreq_gov_userspace_init);
#endif
-module_exit(cpufreq_gov_userspace_exit);
+
+cpufreq_governor_init(cpufreq_gov_userspace);
+cpufreq_governor_exit(cpufreq_gov_userspace);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 297d23cad8b5..91f477a6cbc4 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -2,7 +2,7 @@
/*
* CPU frequency scaling for DaVinci
*
- * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
*
* Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
*
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index e117b0059123..f839dc9852c0 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -221,7 +221,7 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
-/**
+/*
* show_available_freqs - show available frequencies for the specified CPU
*/
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
@@ -260,7 +260,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
__ATTR_RO(_name##_frequencies)
-/**
+/*
* show_scaling_available_frequencies - show available normal frequencies for
* the specified CPU
*/
@@ -272,7 +272,7 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
-/**
+/*
* show_available_boost_freqs - show available boost frequencies for
* the specified CPU
*/
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index fdb2ffffbd15..ef7b34c1fd2b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -193,7 +193,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e0f7880b21a..7f5d81931483 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -201,9 +201,7 @@ struct global_params {
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
* @last_sample_time: Last Sample time
- * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
- * This shift is a multiplier to mperf delta to
- * calculate CPU busy.
+ * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
@@ -275,6 +273,7 @@ static struct cpudata **all_cpu_data;
* @get_min: Callback to get minimum P state
* @get_turbo: Callback to get turbo P state
* @get_scaling: Callback to get frequency scaling factor
+ * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
*
@@ -602,11 +601,12 @@ static const unsigned int epp_values[] = {
HWP_EPP_POWERSAVE
};
-static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
{
s16 epp;
int index = -EINVAL;
+ *raw_epp = 0;
epp = intel_pstate_get_epp(cpu_data, 0);
if (epp < 0)
return epp;
@@ -614,12 +614,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE)
return 1;
- if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
+ if (epp == HWP_EPP_BALANCE_PERFORMANCE)
return 2;
- if (epp <= HWP_EPP_BALANCE_POWERSAVE)
+ if (epp == HWP_EPP_BALANCE_POWERSAVE)
return 3;
- else
+ if (epp == HWP_EPP_POWERSAVE)
return 4;
+ *raw_epp = epp;
+ return 0;
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
@@ -638,7 +640,8 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
}
static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
- int pref_index)
+ int pref_index, bool use_raw,
+ u32 raw_epp)
{
int epp = -EINVAL;
int ret;
@@ -646,29 +649,34 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
if (!pref_index)
epp = cpu_data->epp_default;
- mutex_lock(&intel_pstate_limits_lock);
-
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
- u64 value;
-
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
- if (ret)
- goto return_pref;
+ /*
+ * Use the cached HWP Request MSR value, because the register
+ * itself may be updated by intel_pstate_hwp_boost_up() or
+ * intel_pstate_hwp_boost_down() at any time.
+ */
+ u64 value = READ_ONCE(cpu_data->hwp_req_cached);
value &= ~GENMASK_ULL(31, 24);
- if (epp == -EINVAL)
+ if (use_raw)
+ epp = raw_epp;
+ else if (epp == -EINVAL)
epp = epp_values[pref_index - 1];
value |= (u64)epp << 24;
+ /*
+ * The only other updater of hwp_req_cached in the active mode,
+ * intel_pstate_hwp_set(), is called under the same lock as this
+ * function, so it cannot run in parallel with the update below.
+ */
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
} else {
if (epp == -EINVAL)
epp = (pref_index - 1) << 2;
ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
-return_pref:
- mutex_unlock(&intel_pstate_limits_lock);
return ret;
}
@@ -694,31 +702,54 @@ static ssize_t store_energy_performance_preference(
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
char str_preference[21];
- int ret;
+ bool raw = false;
+ ssize_t ret;
+ u32 epp = 0;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
ret = match_string(energy_perf_strings, -1, str_preference);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return ret;
- intel_pstate_set_energy_pref_index(cpu_data, ret);
- return count;
+ ret = kstrtouint(buf, 10, &epp);
+ if (ret)
+ return ret;
+
+ if (epp > 255)
+ return -EINVAL;
+
+ raw = true;
+ }
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp);
+ if (!ret)
+ ret = count;
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
}
static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
- int preference;
+ int preference, raw_epp;
- preference = intel_pstate_get_energy_pref_index(cpu_data);
+ preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
if (preference < 0)
return preference;
- return sprintf(buf, "%s\n", energy_perf_strings[preference]);
+ if (raw_epp)
+ return sprintf(buf, "%d\n", raw_epp);
+ else
+ return sprintf(buf, "%s\n", energy_perf_strings[preference]);
}
cpufreq_freq_attr_rw(energy_performance_preference);
@@ -866,10 +897,39 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
return 0;
}
+#define POWER_CTL_EE_ENABLE 1
+#define POWER_CTL_EE_DISABLE 2
+
+static int power_ctl_ee_state;
+
+static void set_power_ctl_ee_state(bool input)
+{
+ u64 power_ctl;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ if (input) {
+ power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ power_ctl_ee_state = POWER_CTL_EE_ENABLE;
+ } else {
+ power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ power_ctl_ee_state = POWER_CTL_EE_DISABLE;
+ }
+ wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
+
+ /* Only restore if the system default is changed */
+ if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
+ set_power_ctl_ee_state(true);
+ else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
+ set_power_ctl_ee_state(false);
+
if (!hwp_active)
return 0;
@@ -1218,6 +1278,32 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
return count;
}
+static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ u64 power_ctl;
+ int enable;
+
+ rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
+ return sprintf(buf, "%d\n", !enable);
+}
+
+static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ bool input;
+ int ret;
+
+ ret = kstrtobool(buf, &input);
+ if (ret)
+ return ret;
+
+ set_power_ctl_ee_state(input);
+
+ return count;
+}
+
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);
@@ -1228,6 +1314,7 @@ define_one_global_rw(min_perf_pct);
define_one_global_ro(turbo_pct);
define_one_global_ro(num_pstates);
define_one_global_rw(hwp_dynamic_boost);
+define_one_global_rw(energy_efficiency);
static struct attribute *intel_pstate_attributes[] = {
&status.attr,
@@ -1241,6 +1328,8 @@ static const struct attribute_group intel_pstate_attr_group = {
.attrs = intel_pstate_attributes,
};
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
+
static void __init intel_pstate_sysfs_expose_params(void)
{
struct kobject *intel_pstate_kobject;
@@ -1273,6 +1362,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
&hwp_dynamic_boost.attr);
WARN_ON(rc);
}
+
+ if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
+ rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
+ WARN_ON(rc);
+ }
}
/************************** sysfs end ************************/
@@ -1288,25 +1382,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
-#define MSR_IA32_POWER_CTL_BIT_EE 19
-
-/* Disable energy efficiency optimization */
-static void intel_pstate_disable_ee(int cpu)
-{
- u64 power_ctl;
- int ret;
-
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
- if (ret)
- return;
-
- if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
- pr_info("Disabling energy efficiency optimization\n");
- power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
- wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
- }
-}
-
static int atom_get_min_pstate(void)
{
u64 value;
@@ -1982,10 +2057,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
if (hwp_active) {
const struct x86_cpu_id *id;
- id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
- if (id)
- intel_pstate_disable_ee(cpunum);
-
intel_pstate_hwp_enable(cpu);
id = x86_match_cpu(intel_pstate_hwp_boost_ids);
@@ -2754,7 +2825,12 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
copy_cpu_funcs(&core_funcs);
- if (!no_hwp) {
+ /*
+ * Avoid enabling HWP for processors without EPP support,
+ * because that means incomplete HWP implementation which is a
+ * corner case and supporting it is generally problematic.
+ */
+ if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -2808,8 +2884,17 @@ hwp_cpu_matched:
if (rc)
return rc;
- if (hwp_active)
+ if (hwp_active) {
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+ if (id) {
+ set_power_ctl_ee_state(false);
+ pr_info("Disabling energy efficiency optimization\n");
+ }
+
pr_info("HWP enabled\n");
+ }
return 0;
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 0c98dd08273d..7d1212c9b7c8 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -448,7 +448,7 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
return 0;
}
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 8d14b42a8c6f..3694bb030df3 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -131,7 +131,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
return 0;
}
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index c66f566a854c..815645170c4d 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -22,6 +22,8 @@
#include <asm/time.h>
#include <asm/smp.h>
+#include <platforms/pasemi/pasemi.h>
+
#define SDCASR_REG 0x0100
#define SDCASR_REG_STRIDE 0x1000
#define SDCPWR_CFGA0_REG 0x0100
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 5789fe7a94bd..9f3fc7a073d0 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -616,7 +616,7 @@ static void __exit pcc_cpufreq_exit(void)
free_percpu(pcc_cpu_info);
}
-static const struct acpi_device_id processor_device_ids[] = {
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, },
{ACPI_PROCESSOR_DEVICE_HID, },
{},
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 3984959eed1d..0acc9e241cd7 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -86,7 +86,7 @@ static u32 convert_fid_to_vco_fid(u32 fid)
*/
static int pending_bit_stuck(void)
{
- u32 lo, hi;
+ u32 lo, hi __always_unused;
rdmsr(MSR_FIDVID_STATUS, lo, hi);
return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
@@ -282,7 +282,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data,
{
u32 rvosteps = data->rvo;
u32 savefid = data->currfid;
- u32 maxvid, lo, rvomult = 1;
+ u32 maxvid, lo __always_unused, rvomult = 1;
pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
smp_processor_id(),
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 8646eb197cd9..a9af15e994cc 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -64,13 +64,14 @@
* highest_lpstate_idx
* @last_sampled_time: Time from boot in ms when global pstates were
* last set
- * @last_lpstate_idx, Last set value of local pstate and global
- * last_gpstate_idx pstate in terms of cpufreq table index
+ * @last_lpstate_idx: Last set value of local pstate and global
+ * @last_gpstate_idx: pstate in terms of cpufreq table index
* @timer: Is used for ramping down if cpu goes idle for
* a long time with global pstate held high
* @gpstate_lock: A spinlock to maintain synchronization between
* routines called by the timer handler and
* governer's target_index calls
+ * @policy: Associated CPUFreq policy
*/
struct global_pstate_info {
int highest_lpstate_idx;
@@ -85,7 +86,7 @@ struct global_pstate_info {
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
/**
* struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
* indexed by a function of pstate id.
@@ -170,7 +171,7 @@ static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
/* Use following functions for conversions between pstate_id and index */
-/**
+/*
* idx_to_pstate : Returns the pstate id corresponding to the
* frequency in the cpufreq frequency table
* powernv_freqs indexed by @i.
@@ -188,7 +189,7 @@ static inline u8 idx_to_pstate(unsigned int i)
return powernv_freqs[i].driver_data;
}
-/**
+/*
* pstate_to_idx : Returns the index in the cpufreq frequencytable
* powernv_freqs for the frequency whose corresponding
* pstate id is @pstate.
@@ -380,7 +381,7 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
powernv_freqs[powernv_pstate_info.nominal].frequency);
}
-struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
#define SCALING_BOOST_FREQS_ATTR_INDEX 2
@@ -660,13 +661,13 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
/**
* gpstate_timer_handler
*
- * @data: pointer to cpufreq_policy on which timer was queued
+ * @t: Timer context used to fetch global pstate info struct
*
* This handler brings down the global pstate closer to the local pstate
* according quadratic equation. Queues a new timer if it is still not equal
* to local pstate
*/
-void gpstate_timer_handler(struct timer_list *t)
+static void gpstate_timer_handler(struct timer_list *t)
{
struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
struct cpufreq_policy *policy = gpstates->policy;
@@ -899,7 +900,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier,
};
-void powernv_cpufreq_work_fn(struct work_struct *work)
+static void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
struct cpufreq_policy *policy;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index fc92a8842e25..0a04b6f03b9a 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -238,7 +238,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
policy->fast_switch_possible = true;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 61623e2ff149..fb42e3390377 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -103,17 +103,12 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
}
static int __maybe_unused
-scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
+scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
+ struct device *cpu_dev)
{
- struct device *cpu_dev = get_cpu_device(cpu);
unsigned long Hz;
int ret, domain;
- if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
- return -ENODEV;
- }
-
domain = handle->perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
@@ -198,9 +193,10 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
- policy->fast_switch_possible = true;
+ policy->fast_switch_possible =
+ handle->perf_ops->fast_switch_possible(handle, cpu_dev);
- em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+ em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus);
return 0;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 20d1f85d5f5a..b0f5388b8854 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -167,7 +167,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = false;
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
deleted file mode 100644
index 98d392196df2..000000000000
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * clock scaling for the UniCore-II
- *
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2010 Guan Xuetao
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-
-#include <mach/hardware.h>
-
-static struct cpufreq_driver ucv2_driver;
-
-/* make sure that only the "userspace" governor is run
- * -- anything else wouldn't make sense on this platform, anyway.
- */
-static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
-{
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_cpu_limits(policy);
- return 0;
-}
-
-static int ucv2_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- int ret;
-
- freqs.old = policy->cur;
- freqs.new = target_freq;
-
- cpufreq_freq_transition_begin(policy, &freqs);
- ret = clk_set_rate(policy->clk, target_freq * 1000);
- cpufreq_freq_transition_end(policy, &freqs, ret);
-
- return ret;
-}
-
-static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->min = policy->cpuinfo.min_freq = 250000;
- policy->max = policy->cpuinfo.max_freq = 1000000;
- policy->clk = clk_get(NULL, "MAIN_CLK");
- return PTR_ERR_OR_ZERO(policy->clk);
-}
-
-static struct cpufreq_driver ucv2_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .verify = ucv2_verify_speed,
- .target = ucv2_target,
- .get = cpufreq_generic_get,
- .init = ucv2_cpu_init,
- .name = "UniCore-II",
-};
-
-static int __init ucv2_cpufreq_init(void)
-{
- return cpufreq_register_driver(&ucv2_driver);
-}
-
-arch_initcall(ucv2_cpufreq_init);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 83c85d3d67e3..4e8b1dee7c9a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -450,7 +450,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
- dev_pm_opp_of_register_em(policy->cpus);
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 51a7e89085c0..0844fadc4be8 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -23,6 +23,16 @@ config ARM_PSCI_CPUIDLE
It provides an idle driver that is capable of detecting and
managing idle states through the PSCI firmware interface.
+config ARM_PSCI_CPUIDLE_DOMAIN
+ bool "PSCI CPU idle Domain"
+ depends on ARM_PSCI_CPUIDLE
+ depends on PM_GENERIC_DOMAINS_OF
+ default y
+ help
+ Select this to enable the PSCI based CPUidle driver to use PM domains,
+ which is needed to support the hierarchical DT based layout of the
+ idle states.
+
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index f07800cbb43f..26bbc5e74123 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -21,9 +21,8 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
-obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
-cpuidle_psci-y := cpuidle-psci.o
-cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE_DOMAIN) += cpuidle-psci-domain.o
obj-$(CONFIG_ARM_TEGRA_CPUIDLE) += cpuidle-tegra.o
obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE) += cpuidle-qcom-spm.o
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 423f03bbeb74..b6e9649ab0da 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -12,6 +12,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/psci.h>
@@ -26,7 +27,7 @@ struct psci_pd_provider {
};
static LIST_HEAD(psci_pd_providers);
-static bool osi_mode_enabled __initdata;
+static bool psci_pd_allow_domain_state;
static int psci_pd_power_off(struct generic_pm_domain *pd)
{
@@ -36,6 +37,9 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
if (!state->data)
return 0;
+ if (!psci_pd_allow_domain_state)
+ return -EBUSY;
+
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
psci_set_domain_state(*pd_state);
@@ -43,8 +47,8 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
return 0;
}
-static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states,
- int state_count)
+static int psci_pd_parse_state_nodes(struct genpd_power_state *states,
+ int state_count)
{
int i, ret;
u32 psci_state, *psci_state_buf;
@@ -73,7 +77,7 @@ free_state:
return ret;
}
-static int __init psci_pd_parse_states(struct device_node *np,
+static int psci_pd_parse_states(struct device_node *np,
struct genpd_power_state **states, int *state_count)
{
int ret;
@@ -101,7 +105,7 @@ static void psci_pd_free_states(struct genpd_power_state *states,
kfree(states);
}
-static int __init psci_pd_init(struct device_node *np)
+static int psci_pd_init(struct device_node *np)
{
struct generic_pm_domain *pd;
struct psci_pd_provider *pd_provider;
@@ -168,7 +172,7 @@ out:
return ret;
}
-static void __init psci_pd_remove(void)
+static void psci_pd_remove(void)
{
struct psci_pd_provider *pd_provider, *it;
struct generic_pm_domain *genpd;
@@ -186,7 +190,7 @@ static void __init psci_pd_remove(void)
}
}
-static int __init psci_pd_init_topology(struct device_node *np, bool add)
+static int psci_pd_init_topology(struct device_node *np, bool add)
{
struct device_node *node;
struct of_phandle_args child, parent;
@@ -212,24 +216,33 @@ static int __init psci_pd_init_topology(struct device_node *np, bool add)
return 0;
}
-static int __init psci_pd_add_topology(struct device_node *np)
+static int psci_pd_add_topology(struct device_node *np)
{
return psci_pd_init_topology(np, true);
}
-static void __init psci_pd_remove_topology(struct device_node *np)
+static void psci_pd_remove_topology(struct device_node *np)
{
psci_pd_init_topology(np, false);
}
-static const struct of_device_id psci_of_match[] __initconst = {
+static void psci_cpuidle_domain_sync_state(struct device *dev)
+{
+ /*
+ * All devices have now been attached/probed to the PM domain topology,
+ * hence it's fine to allow domain states to be picked.
+ */
+ psci_pd_allow_domain_state = true;
+}
+
+static const struct of_device_id psci_of_match[] = {
{ .compatible = "arm,psci-1.0" },
{}
};
-static int __init psci_idle_init_domains(void)
+static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
- struct device_node *np = of_find_matching_node(NULL, psci_of_match);
+ struct device_node *np = pdev->dev.of_node;
struct device_node *node;
int ret = 0, pd_count = 0;
@@ -238,7 +251,7 @@ static int __init psci_idle_init_domains(void)
/* Currently limit the hierarchical topology to be used in OSI mode. */
if (!psci_has_osi_support())
- goto out;
+ return 0;
/*
* Parse child nodes for the "#power-domain-cells" property and
@@ -257,7 +270,7 @@ static int __init psci_idle_init_domains(void)
/* Bail out if not using the hierarchical CPU topology. */
if (!pd_count)
- goto out;
+ return 0;
/* Link genpd masters/subdomains to model the CPU topology. */
ret = psci_pd_add_topology(np);
@@ -272,10 +285,8 @@ static int __init psci_idle_init_domains(void)
goto remove_pd;
}
- osi_mode_enabled = true;
- of_node_put(np);
pr_info("Initialized CPU PM domain topology\n");
- return pd_count;
+ return 0;
put_node:
of_node_put(node);
@@ -283,19 +294,28 @@ remove_pd:
if (pd_count)
psci_pd_remove();
pr_err("failed to create CPU PM domains ret=%d\n", ret);
-out:
- of_node_put(np);
return ret;
}
+
+static struct platform_driver psci_cpuidle_domain_driver = {
+ .probe = psci_cpuidle_domain_probe,
+ .driver = {
+ .name = "psci-cpuidle-domain",
+ .of_match_table = psci_of_match,
+ .sync_state = psci_cpuidle_domain_sync_state,
+ },
+};
+
+static int __init psci_idle_init_domains(void)
+{
+ return platform_driver_register(&psci_cpuidle_domain_driver);
+}
subsys_initcall(psci_idle_init_domains);
-struct device __init *psci_dt_attach_cpu(int cpu)
+struct device *psci_dt_attach_cpu(int cpu)
{
struct device *dev;
- if (!osi_mode_enabled)
- return NULL;
-
dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
if (IS_ERR_OR_NULL(dev))
return dev;
@@ -306,3 +326,11 @@ struct device __init *psci_dt_attach_cpu(int cpu)
return dev;
}
+
+void psci_dt_detach_cpu(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ dev_pm_domain_detach(dev, false);
+}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 3806f911b61c..74463841805f 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -17,9 +17,11 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/psci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/cpuidle.h>
@@ -33,7 +35,7 @@ struct psci_cpuidle_data {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(u32, domain_state);
-static bool psci_cpuidle_use_cpuhp __initdata;
+static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(u32 state)
{
@@ -104,7 +106,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
return 0;
}
-static void __init psci_idle_init_cpuhp(void)
+static void psci_idle_init_cpuhp(void)
{
int err;
@@ -127,30 +129,13 @@ static int psci_enter_idle_state(struct cpuidle_device *dev,
return psci_enter_state(idx, state[idx]);
}
-static struct cpuidle_driver psci_idle_driver __initdata = {
- .name = "psci_idle",
- .owner = THIS_MODULE,
- /*
- * PSCI idle states relies on architectural WFI to
- * be represented as state index 0.
- */
- .states[0] = {
- .enter = psci_enter_idle_state,
- .exit_latency = 1,
- .target_residency = 1,
- .power_usage = UINT_MAX,
- .name = "WFI",
- .desc = "ARM WFI",
- }
-};
-
-static const struct of_device_id psci_idle_state_match[] __initconst = {
+static const struct of_device_id psci_idle_state_match[] = {
{ .compatible = "arm,idle-state",
.data = psci_enter_idle_state },
{ },
};
-int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+int psci_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
@@ -167,9 +152,9 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
-static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
- struct psci_cpuidle_data *data,
- unsigned int state_count, int cpu)
+static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct psci_cpuidle_data *data,
+ unsigned int state_count, int cpu)
{
/* Currently limit the hierarchical topology to be used in OSI mode. */
if (!psci_has_osi_support())
@@ -190,9 +175,9 @@ static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
return 0;
}
-static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
- struct device_node *cpu_node,
- unsigned int state_count, int cpu)
+static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
+ struct device_node *cpu_node,
+ unsigned int state_count, int cpu)
{
int i, ret = 0;
u32 *psci_states;
@@ -200,7 +185,8 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
state_count++; /* Add WFI state too */
- psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
+ psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states),
+ GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
@@ -213,32 +199,26 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
of_node_put(state_node);
if (ret)
- goto free_mem;
+ return ret;
pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
- if (i != state_count) {
- ret = -ENODEV;
- goto free_mem;
- }
+ if (i != state_count)
+ return -ENODEV;
/* Initialize optional data, used for the hierarchical topology. */
ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
if (ret < 0)
- goto free_mem;
+ return ret;
/* Idle states parsed correctly, store them in the per-cpu struct. */
data->psci_states = psci_states;
return 0;
-
-free_mem:
- kfree(psci_states);
- return ret;
}
-static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
- unsigned int cpu, unsigned int state_count)
+static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
+ unsigned int cpu, unsigned int state_count)
{
struct device_node *cpu_node;
int ret;
@@ -254,14 +234,22 @@ static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
if (!cpu_node)
return -ENODEV;
- ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
+ ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu);
of_node_put(cpu_node);
return ret;
}
-static int __init psci_idle_init_cpu(int cpu)
+static void psci_cpu_deinit_idle(int cpu)
+{
+ struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
+
+ psci_dt_detach_cpu(data->dev);
+ psci_cpuidle_use_cpuhp = false;
+}
+
+static int psci_idle_init_cpu(struct device *dev, int cpu)
{
struct cpuidle_driver *drv;
struct device_node *cpu_node;
@@ -284,17 +272,26 @@ static int __init psci_idle_init_cpu(int cpu)
if (ret)
return ret;
- drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
+ drv->name = "psci_idle";
+ drv->owner = THIS_MODULE;
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
/*
- * Initialize idle states data, starting at index 1, since
- * by default idle state 0 is the quiescent state reached
- * by the cpu by executing the wfi instruction.
- *
+ * PSCI idle states relies on architectural WFI to be represented as
+ * state index 0.
+ */
+ drv->states[0].enter = psci_enter_idle_state;
+ drv->states[0].exit_latency = 1;
+ drv->states[0].target_residency = 1;
+ drv->states[0].power_usage = UINT_MAX;
+ strcpy(drv->states[0].name, "WFI");
+ strcpy(drv->states[0].desc, "ARM WFI");
+
+ /*
* If no DT idle states are detected (ret == 0) let the driver
* initialization fail accordingly since there is no reason to
* initialize the idle driver if only wfi is supported, the
@@ -302,48 +299,45 @@ static int __init psci_idle_init_cpu(int cpu)
* on idle entry.
*/
ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
- if (ret <= 0) {
- ret = ret ? : -ENODEV;
- goto out_kfree_drv;
- }
+ if (ret <= 0)
+ return ret ? : -ENODEV;
/*
* Initialize PSCI idle states.
*/
- ret = psci_cpu_init_idle(drv, cpu, ret);
+ ret = psci_cpu_init_idle(dev, drv, cpu, ret);
if (ret) {
pr_err("CPU %d failed to PSCI idle\n", cpu);
- goto out_kfree_drv;
+ return ret;
}
ret = cpuidle_register(drv, NULL);
if (ret)
- goto out_kfree_drv;
+ goto deinit;
cpuidle_cooling_register(drv);
return 0;
-
-out_kfree_drv:
- kfree(drv);
+deinit:
+ psci_cpu_deinit_idle(cpu);
return ret;
}
/*
- * psci_idle_init - Initializes PSCI cpuidle driver
+ * psci_idle_probe - Initializes PSCI cpuidle driver
*
* Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
-static int __init psci_idle_init(void)
+static int psci_cpuidle_probe(struct platform_device *pdev)
{
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
for_each_possible_cpu(cpu) {
- ret = psci_idle_init_cpu(cpu);
+ ret = psci_idle_init_cpu(&pdev->dev, cpu);
if (ret)
goto out_fail;
}
@@ -356,9 +350,34 @@ out_fail:
dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister(drv);
- kfree(drv);
+ psci_cpu_deinit_idle(cpu);
}
return ret;
}
+
+static struct platform_driver psci_cpuidle_driver = {
+ .probe = psci_cpuidle_probe,
+ .driver = {
+ .name = "psci-cpuidle",
+ },
+};
+
+static int __init psci_idle_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ ret = platform_driver_register(&psci_cpuidle_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&psci_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
device_initcall(psci_idle_init);
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index 7299a04dd467..d8e925e84c27 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -3,15 +3,18 @@
#ifndef __CPUIDLE_PSCI_H
#define __CPUIDLE_PSCI_H
+struct device;
struct device_node;
void psci_set_domain_state(u32 state);
-int __init psci_dt_parse_state_node(struct device_node *np, u32 *state);
+int psci_dt_parse_state_node(struct device_node *np, u32 *state);
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-struct device __init *psci_dt_attach_cpu(int cpu);
+#ifdef CONFIG_ARM_PSCI_CPUIDLE_DOMAIN
+struct device *psci_dt_attach_cpu(int cpu);
+void psci_dt_detach_cpu(struct device *dev);
#else
-static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; }
+static inline struct device *psci_dt_attach_cpu(int cpu) { return NULL; }
+static inline void psci_dt_detach_cpu(struct device *dev) { }
#endif
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 150045849d78..a12fb141875a 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -253,11 +253,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
return err ? -1 : index;
}
-static void tegra114_enter_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static int tegra114_enter_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
tegra_cpuidle_enter(dev, drv, index);
+
+ return 0;
}
/*
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 802b9ada4e9e..aa3a4ed07a66 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER
config CRYPTO_DEV_QCE_SHA
bool
depends on CRYPTO_DEV_QCE
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
choice
prompt "Algorithms enabled for QCE acceleration"
@@ -756,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
- select CRYPTO_AES
+ select CRYPTO_LIB_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_CTR
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -865,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
+config CRYPTO_DEV_SA2UL
+ tristate "Support for TI security accelerator"
+ depends on ARCH_K3 || COMPILE_TEST
+ select ARM64_CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_AES_ARM64
+ select CRYPTO_ALGAPI
+ select HW_RANDOM
+ select SG_SPLIT
+ help
+ K3 devices include a security accelerator engine that may be
+ used for crypto offload. Select this if you want to use hardware
+ acceleration for cryptographic algorithms on these devices.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 944ed7226e37..53fc115cf459 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 7f22d305178e..b72de8939497 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
int err;
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (ctx->mode & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&ctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&ctx->fallback_req);
return err;
}
@@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
alg.crypto.base);
op->ss = algt->ss;
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct sun4i_cipher_req_ctx));
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct sun4i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
+
+
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put(op->ss->dev);
}
@@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the DES key, prepare the mode to be used */
@@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the 3DES key, prepare the mode to be used */
@@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 2b4c6333eb67..163962f9e284 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -170,11 +170,12 @@ struct sun4i_tfm_ctx {
u32 keylen;
u32 keymode;
struct sun4i_ss_ctx *ss;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
struct sun4i_cipher_req_ctx {
u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
};
struct sun4i_req_ctx {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index a6abb701bfc6..1e4f9a58bb24 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
-#endif
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir & CE_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
op->ce = algt->ce;
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+
dev_info(op->ce->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
@@ -358,7 +357,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -370,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync_suspend(op->ce->dev);
}
@@ -400,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -425,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index b957061424a1..138759dc8190 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 0e9eac397e1b..963645fe4adb 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -181,12 +181,14 @@ struct sun8i_ce_dev {
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
- * @op_dir: direction (encrypt vs decrypt) for this request
- * @flow: the flow to use for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx {
u32 *key;
u32 keylen;
struct sun8i_ce_dev *ce;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index c89cb2ee2496..7a131675a41c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
@@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
op->ss = algt->ss;
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+
dev_info(op->ss->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
@@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync(op->ss->dev);
}
@@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 5d9d0fedcb06..9a23515783a6 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 29c44f279112..0405767f1f7e 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -135,17 +135,18 @@ struct sun8i_ss_dev {
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
- * @t_src: list of mapped SGs with their size
- * @t_dst: list of mapped SGs with their size
- * @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
- * @method: current algorithm for this request
- * @op_mode: op_mode for this request
- * @op_dir: direction (encrypt vs decrypt) for this request
- * @flow: the flow to use for this request
- * @ivlen: size of biv
- * @keylen: keylen for this request
- * @biv: buffer which contain the IV
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @p_key: DMA address of the key
+ * @p_iv: DMA address of the IV
+ * @method: current algorithm for this request
+ * @op_mode: op_mode for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @ivlen: size of biv
+ * @keylen: keylen for this request
+ * @biv: buffer which contain the IV
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
@@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx {
unsigned int ivlen;
unsigned int keylen;
void *biv;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx {
u32 *key;
u32 keylen;
struct sun8i_ss_dev *ss;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index cf9547602670..cf2c676a7093 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,7 +1,7 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
depends on HAS_IOMEM
- default y if ARCH_MESON
+ default m if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_ECB
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 9819dd50fbad..5880b94dcb32 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
-#endif
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
-#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(req, op->fallback_tfm);
- skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(req, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
+
if (rctx->op_dir == MESON_DECRYPT)
- err = crypto_skcipher_decrypt(req);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
op->mc = algt->mc;
- sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
op->enginectx.op.do_one_request = meson_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
@@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
}
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 411857fad8ba..466552acbbbb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index b7f2de91ab76..dc0f142324a3 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -109,6 +109,7 @@ struct meson_dev {
struct meson_cipher_req_ctx {
u32 op_dir;
int flow;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx {
u32 keylen;
u32 keymode;
struct meson_dev *mc;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 62ba0325a618..1a46eeddf082 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = {
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index a353217a0d33..8a7fa1ae1ade 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.cipher_info = {
@@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
crypto->base.cra_priority = cipher_pri;
crypto->base.cra_alignmask = 0;
crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
crypto->init = skcipher_init_tfm;
crypto->exit = skcipher_exit_tfm;
@@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_alignmask = 0;
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b2f9882bc010..91feda5b63f6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
-static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
- const u8 *key, unsigned int keylen)
-{
- return skcipher_setkey(skcipher, key, keylen, 0);
-}
-
static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -838,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(jrdev, "key size mismatch\n");
+ dev_dbg(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = {
},
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
- {
- .skcipher = {
- .base = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-caam",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- },
- .setkey = arc4_skcipher_setkey,
- .encrypt = skcipher_encrypt,
- .decrypt = skcipher_decrypt,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- },
- .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
- },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -3433,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
@@ -3446,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_aead_init;
alg->exit = caam_aead_exit;
@@ -3457,7 +3438,6 @@ int caam_algapi_init(struct device *ctrldev)
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
- u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false, gcm_support;
@@ -3477,8 +3457,6 @@ int caam_algapi_init(struct device *ctrldev)
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
- CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
@@ -3499,7 +3477,6 @@ int caam_algapi_init(struct device *ctrldev)
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
- arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
@@ -3522,10 +3499,6 @@ int caam_algapi_init(struct device *ctrldev)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- /* Skip ARC4 algorithms if not supported by device */
- if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
- continue;
-
/*
* Check support for AES modes not available
* on LP devices.
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 27e36bdf6163..bb1c0106a95c 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(jrdev, "key size mismatch\n");
+ dev_dbg(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
@@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_aead_init;
alg->exit = caam_aead_exit;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 28669cbecf77..66ae1d581168 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1058,7 +1058,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(dev, "key size mismatch\n");
+ dev_dbg(dev, "key size mismatch\n");
return -EINVAL;
}
@@ -2912,7 +2912,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init_skcipher;
alg->exit = caam_cra_exit;
@@ -2925,7 +2926,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init_aead;
alg->exit = caam_cra_exit_aead;
@@ -4004,7 +4006,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
- int ret;
+ int ret = -ENOMEM;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
@@ -4017,7 +4019,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
DMA_TO_DEVICE);
if (!mapped_nents) {
dev_err(ctx->dev, "unable to DMA map source\n");
- return -ENOMEM;
+ return ret;
}
} else {
mapped_nents = 0;
@@ -4027,7 +4029,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc = qi_cache_zalloc(GFP_DMA | flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- return -ENOMEM;
+ return ret;
}
edesc->src_nents = src_nents;
@@ -4082,7 +4084,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
unmap:
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
- return -ENOMEM;
+ return ret;
}
static int ahash_update_first(struct ahash_request *req)
@@ -4498,7 +4500,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -4547,7 +4553,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
t_alg->dev = dev;
@@ -4697,6 +4703,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int err;
+
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpseci_reset() failed\n");
+ }
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
@@ -4894,6 +4907,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_reset() failed\n");
+ goto err_get_vers;
+ }
+ }
+
err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
&priv->dpseci_attr);
if (err) {
@@ -5221,7 +5242,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
dev_warn(dev, "%s hash alg allocation failed: %d\n",
- alg->driver_name, err);
+ alg->hmac_driver_name, err);
continue;
}
@@ -5384,6 +5405,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
},
{ .vendor = 0x0 }
};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
static struct fsl_mc_driver dpaa2_caam_driver = {
.driver = {
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 27ff4a3d037e..e8a6d8bc43b5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1927,7 +1927,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 60e2a54c19f1..c3c22a8de4c0 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -43,7 +43,6 @@
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
-#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index f3d20b7645e0..94502f1d4b48 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
* pipeline to a depth of 1 (from it's default of 4) to preclude this situation
* from occurring.
*/
-static void handle_imx6_err005766(u32 *mcr)
+static void handle_imx6_err005766(u32 __iomem *mcr)
{
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6dl") ||
@@ -527,11 +527,21 @@ static const struct caam_imx_data caam_imx6ul_data = {
.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
};
+static const struct clk_bulk_data caam_vf610_clks[] = {
+ { .id = "ipg" },
+};
+
+static const struct caam_imx_data caam_vf610_data = {
+ .clks = caam_vf610_clks,
+ .num_clks = ARRAY_SIZE(caam_vf610_clks),
+};
+
static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
+ { .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
index 8a68531ded0b..039df6c5790c 100644
--- a/drivers/crypto/caam/dpseci.c
+++ b/drivers/crypto/caam/dpseci.c
@@ -104,6 +104,24 @@ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
}
/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpseci_is_enabled() - Check if the DPSECI is enabled.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
index 4550e134d166..6dcd9be8144b 100644
--- a/drivers/crypto/caam/dpseci.h
+++ b/drivers/crypto/caam/dpseci.h
@@ -59,6 +59,8 @@ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
int *en);
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
index 6ab77ead6e3d..71a007c85adb 100644
--- a/drivers/crypto/caam/dpseci_cmd.h
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -33,6 +33,7 @@
#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 17c6108b6d41..72db90176b1a 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -212,6 +212,9 @@ static const char * const rng_err_id_list[] = {
"Prediction resistance and test request",
"Uninstantiate",
"Secure key generation",
+ "",
+ "Hardware error",
+ "Continuous check"
};
static int report_ccb_status(struct device *jrdev, const u32 status,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4af22e7ceb4f..bf6b03b17251 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -339,8 +339,7 @@ EXPORT_SYMBOL(caam_jr_free);
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
* if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
* descriptor.
- * @dev: device of the job ring to be used. This device should have
- * been assigned prior by caam_jr_register().
+ * @dev: struct device of the job ring to be used
* @desc: points to a job descriptor that execute our request. All
* descriptors (and all referenced data) must be in a DMAable
* region, and all data references must be physical addresses
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 0f810bc13b2b..af61f3a2c0d4 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -173,9 +173,14 @@ static inline u64 rd_reg64(void __iomem *reg)
static inline u64 cpu_to_caam_dma64(dma_addr_t value)
{
- if (caam_imx)
- return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
- (u64)cpu_to_caam32(upper_32_bits(value)));
+ if (caam_imx) {
+ u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32;
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ ret_val |= (u64)cpu_to_caam32(upper_32_bits(value));
+
+ return ret_val;
+ }
return cpu_to_caam64(value);
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1be1adffff1d..5af0dc2a8909 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -99,10 +99,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct fc_context *fctx = &rctx->fctx;
- u64 *offset_control = &rctx->control_word;
u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
- u64 *ctrl_flags = NULL;
+ __be64 *ctrl_flags = NULL;
+ __be64 *offset_control;
req_info->ctrl.s.grp = 0;
req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
@@ -126,9 +126,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
else
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
- ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
- *ctrl_flags = cpu_to_be64(*ctrl_flags);
+ ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags;
+ *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+ offset_control = (__be64 *)&rctx->control_word;
*offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
/* Storing Packet Data Information in offset
* Control Word First 8 bytes
@@ -200,6 +201,7 @@ static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
int status;
memset(req_info, 0, sizeof(struct cpt_request_info));
+ req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
memset(fctx, 0, sizeof(struct fc_context));
create_input_list(req, enc, enc_iv_len);
create_output_list(req, enc_iv_len);
@@ -339,7 +341,8 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
}
static struct skcipher_alg algs[] = { {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -356,7 +359,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -373,7 +377,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -389,7 +394,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -406,7 +412,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
@@ -423,7 +430,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 7a24019356b5..3878b01e19e1 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -4,6 +4,7 @@
*/
#include "cptvf.h"
+#include "cptvf_algs.h"
#include "request_manager.h"
/**
@@ -133,7 +134,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup gather (input) components */
g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
- info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
+ info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->gather_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -150,7 +151,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup scatter (output) components */
s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
- info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
+ info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->scatter_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -167,17 +168,16 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Create and initialize DPTR */
info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
+ info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->in_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
}
- ((u16 *)info->in_buffer)[0] = req->outcnt;
- ((u16 *)info->in_buffer)[1] = req->incnt;
- ((u16 *)info->in_buffer)[2] = 0;
- ((u16 *)info->in_buffer)[3] = 0;
- *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
+ ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
+ ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
+ ((__be16 *)info->in_buffer)[2] = 0;
+ ((__be16 *)info->in_buffer)[3] = 0;
memcpy(&info->in_buffer[8], info->gather_components,
g_sz_bytes);
@@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
}
/* Create and initialize RPTR */
- info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
+ info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->out_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
struct cpt_vq_command vq_cmd;
union cpt_inst_s cptinst;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
return -ENOMEM;
@@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
* Get buffer for union cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
+ info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
ret = -ENOMEM;
@@ -470,8 +470,6 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
- /* 64-bit swap for microcode data reads, not needed for addresses*/
- vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64);
vq_cmd.dptr = info->dptr_baddr;
vq_cmd.rptr = info->rptr_baddr;
vq_cmd.cptr.u64 = 0;
diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
index 3514b082eca7..8d40e4ba3af1 100644
--- a/drivers/crypto/cavium/cpt/request_manager.h
+++ b/drivers/crypto/cavium/cpt/request_manager.h
@@ -62,6 +62,8 @@ struct cpt_request_info {
union ctrl_info ctrl; /* User control information */
struct cptvf_request req; /* Request Information (Core specific) */
+ bool may_sleep;
+
struct buf_ptr in[MAX_BUF_CNT];
struct buf_ptr out[MAX_BUF_CNT];
@@ -73,16 +75,16 @@ struct sglist_component {
union {
u64 len;
struct {
- u16 len0;
- u16 len1;
- u16 len2;
- u16 len3;
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
} s;
} u;
- u64 ptr0;
- u64 ptr1;
- u64 ptr2;
- u64 ptr3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
};
struct cpt_info_buffer {
@@ -112,10 +114,10 @@ struct cpt_info_buffer {
union vq_cmd_word0 {
u64 u64;
struct {
- u16 opcode;
- u16 param1;
- u16 param2;
- u16 dlen;
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
} s;
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index dce5423a5883..1be2571363fe 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -522,7 +522,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_name = "gcm(aes)",
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -541,7 +541,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "n5_rfc4106",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 18088b0a2257..a553ac65f324 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -388,7 +388,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cbc(aes)",
.cra_driver_name = "n5_cbc(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -407,7 +407,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "ecb(aes)",
.cra_driver_name = "n5_ecb(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -426,7 +426,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cfb(aes)",
.cra_driver_name = "n5_cfb(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -445,7 +445,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "xts(aes)",
.cra_driver_name = "n5_xts(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -464,7 +464,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "n5_rfc3686(ctr(aes))",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -483,7 +483,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cts(cbc(aes))",
.cra_driver_name = "n5_cts(cbc(aes))",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -502,7 +502,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "n5_cbc(des3_ede)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -521,7 +521,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "n5_ecb(des3_ede)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 5eba7ee49e81..11a305fa19e6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -378,6 +378,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
base->cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 9e8f07c1afac..1c1c939f5c39 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -172,6 +172,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 04b2517df955..6849261ca47d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -98,7 +98,7 @@ static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct skcipher_request *req,
@@ -145,20 +145,19 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
- ctx->u.aes.tfm_skcipher);
-
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ skcipher_request_set_tfm(&rctx->fallback_req,
+ ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -198,13 +197,12 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req)
static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
- CRYPTO_ALG_ASYNC |
+ fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
pr_warn("could not load fallback driver xts(aes)\n");
@@ -212,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
+ crypto_skcipher_reqsize(fallback_tfm));
return 0;
}
@@ -221,7 +220,7 @@ static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
@@ -243,6 +242,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->base.cra_blocksize = AES_BLOCK_SIZE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 51e12fbd1159..e6dcd8cedd53 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -212,6 +212,7 @@ static const struct skcipher_alg ccp_aes_defaults = {
.init = ccp_aes_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
@@ -229,6 +230,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
.init = ccp_aes_rfc3686_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 9c129defdb50..ec97daf0fcb7 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -136,6 +136,7 @@ static const struct skcipher_alg ccp_des3_defaults = {
.init = ccp_des3_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b0cc2bd73af8..8fbfdb9e8cd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -19,6 +19,7 @@
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
@@ -424,7 +425,7 @@ static int ccp_register_hmac_alg(struct list_head *head,
*ccp_alg = *base_alg;
INIT_LIST_HEAD(&ccp_alg->entry);
- strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
+ strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
alg = &ccp_alg->alg;
alg->setkey = ccp_sha_setkey;
@@ -486,6 +487,7 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
base->cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 90a009e6b5c1..aed3d2192d01 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -89,7 +89,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_sync_skcipher *tfm_skcipher;
+ struct crypto_skcipher *tfm_skcipher;
enum ccp_engine engine;
enum ccp_aes_type type;
@@ -121,6 +121,8 @@ struct ccp_aes_req_ctx {
u8 rfc3686_iv[AES_BLOCK_SIZE];
struct ccp_cmd cmd;
+
+ struct skcipher_request fallback_req; // keep at the end
};
struct ccp_aes_cmac_req_ctx {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 82ac4c14c04c..7838f63bab32 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -221,8 +221,8 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
static int ccp5_do_cmd(struct ccp5_desc *desc,
struct ccp_cmd_queue *cmd_q)
{
- u32 *mP;
- __le32 *dP;
+ __le32 *mP;
+ u32 *dP;
u32 tail;
int i;
int ret = 0;
@@ -235,8 +235,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
}
mutex_lock(&cmd_q->q_mutex);
- mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
- dP = (__le32 *) desc;
+ mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx];
+ dP = (u32 *)desc;
for (i = 0; i < 8; i++)
mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 19ac509ed76e..0971ee60f840 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -531,7 +531,6 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return len;
}
-#ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp)
{
unsigned int suspended = 0;
@@ -549,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+int ccp_dev_suspend(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -601,7 +600,6 @@ int ccp_dev_resume(struct sp_device *sp)
return 0;
}
-#endif
int ccp_dev_init(struct sp_device *sp)
{
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 3f68262d9ab4..a5d9123a22ea 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -469,6 +469,7 @@ struct ccp_sg_workarea {
unsigned int sg_used;
struct scatterlist *dma_sg;
+ struct scatterlist *dma_sg_head;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;
@@ -596,8 +597,8 @@ struct dword3 {
};
union dword4 {
- __le32 dst_lo; /* NON-SHA */
- __le32 sha_len_lo; /* SHA */
+ u32 dst_lo; /* NON-SHA */
+ u32 sha_len_lo; /* SHA */
};
union dword5 {
@@ -607,7 +608,7 @@ union dword5 {
unsigned int rsvd1:13;
unsigned int fixed:1;
} fields;
- __le32 sha_len_hi;
+ u32 sha_len_hi;
};
struct dword7 {
@@ -618,12 +619,12 @@ struct dword7 {
struct ccp5_desc {
struct dword0 dw0;
- __le32 length;
- __le32 src_lo;
+ u32 length;
+ u32 src_lo;
struct dword3 dw3;
union dword4 dw4;
union dword5 dw5;
- __le32 key_lo;
+ u32 key_lo;
struct dword7 dw7;
};
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 422193690fd4..bd270e66185e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+ dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
}
@@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
return 0;
wa->dma_sg = sg;
+ wa->dma_sg_head = sg;
wa->dma_dev = dev;
wa->dma_dir = dma_dir;
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
@@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
{
unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+ unsigned int sg_combined_len = 0;
if (!wa->sg)
return;
wa->sg_used += nbytes;
wa->bytes_left -= nbytes;
- if (wa->sg_used == wa->sg->length) {
- wa->sg = sg_next(wa->sg);
+ if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
+ /* Advance to the next DMA scatterlist entry */
+ wa->dma_sg = sg_next(wa->dma_sg);
+
+ /* In the case that the DMA mapped scatterlist has entries
+ * that have been merged, the non-DMA mapped scatterlist
+ * must be advanced multiple times for each merged entry.
+ * This ensures that the current non-DMA mapped entry
+ * corresponds to the current DMA mapped entry.
+ */
+ do {
+ sg_combined_len += wa->sg->length;
+ wa->sg = sg_next(wa->sg);
+ } while (wa->sg_used > sg_combined_len);
+
wa->sg_used = 0;
}
}
@@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
/* Update the structures and generate the count */
buf_count = 0;
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
- nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+ nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
dm_wa->length - buf_count);
nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
@@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
* and destination. The resulting len values will always be <= UINT_MAX
* because the dma length is an unsigned int.
*/
- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
if (dst) {
- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+ sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len);
} else {
@@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough data in the sg element, but we need to
* adjust for any previously copied data
*/
- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
op->src.u.dma.offset = src->sg_wa.sg_used;
op->src.u.dma.length = op_len & ~(block_size - 1);
@@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough room in the sg element, but we need to
* adjust for any previously used area
*/
- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
op->dst.u.dma.offset = dst->sg_wa.sg_used;
op->dst.u.dma.length = op->src.u.dma.length;
}
@@ -617,13 +632,12 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_data src, dst;
struct ccp_data aad;
struct ccp_op op;
-
- unsigned long long *final;
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
+ __be64 *final;
int ret;
struct scatterlist *p_inp, sg_inp[2];
@@ -825,7 +839,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (unsigned long long *) final_wa.address;
+ final = (__be64 *)final_wa.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
@@ -1308,7 +1322,6 @@ ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
}
- ret = -EIO;
/* Zero out all the fields of the command desc */
memset(&op, 0, sizeof(op));
@@ -2028,7 +2041,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.sg_wa.sg_used = 0;
for (i = 1; i <= src.sg_wa.dma_count; i++) {
if (!dst.sg_wa.sg ||
- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+ (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
ret = -EINVAL;
goto e_dst;
}
@@ -2054,8 +2067,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- dst.sg_wa.sg_used += src.sg_wa.sg->length;
- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+ dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
+ if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
dst.sg_wa.sg_used = 0;
}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index ce42675d3274..6284a15e5047 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -211,13 +211,12 @@ void sp_destroy(struct sp_device *sp)
sp_del_device(sp);
}
-#ifdef CONFIG_PM
-int sp_suspend(struct sp_device *sp, pm_message_t state)
+int sp_suspend(struct sp_device *sp)
{
int ret;
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_suspend(sp, state);
+ ret = ccp_dev_suspend(sp);
if (ret)
return ret;
}
@@ -237,7 +236,6 @@ int sp_resume(struct sp_device *sp)
return 0;
}
-#endif
struct sp_device *sp_get_psp_master_device(void)
{
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index f913f1494af9..0218d0670eee 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -119,7 +119,7 @@ int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
struct sp_device *sp_get_master(void);
-int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_suspend(struct sp_device *sp);
int sp_resume(struct sp_device *sp);
int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
const char *name, void *data);
@@ -134,7 +134,7 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
-int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_suspend(struct sp_device *sp);
int ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@@ -145,7 +145,7 @@ static inline int ccp_dev_init(struct sp_device *sp)
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
-static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+static inline int ccp_dev_suspend(struct sp_device *sp)
{
return 0;
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index cb6cb47053f4..f471dbaef1fb 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -252,23 +252,19 @@ static void sp_pci_remove(struct pci_dev *pdev)
sp_free_irqs(sp);
}
-#ifdef CONFIG_PM
-static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused sp_pci_suspend(struct device *dev)
{
- struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
- return sp_suspend(sp, state);
+ return sp_suspend(sp);
}
-static int sp_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused sp_pci_resume(struct device *dev)
{
- struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
return sp_resume(sp);
}
-#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
static const struct sev_vdata sevv1 = {
@@ -365,15 +361,14 @@ static const struct pci_device_id sp_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, sp_pci_table);
+static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume);
+
static struct pci_driver sp_pci_driver = {
.name = "ccp",
.id_table = sp_pci_table,
.probe = sp_pci_probe,
.remove = sp_pci_remove,
-#ifdef CONFIG_PM
- .suspend = sp_pci_suspend,
- .resume = sp_pci_resume,
-#endif
+ .driver.pm = &sp_pci_pm_ops,
};
int sp_pci_init(void)
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 831aac1393a2..9dba52fbee99 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -207,7 +207,7 @@ static int sp_platform_suspend(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
- return sp_suspend(sp, state);
+ return sp_suspend(sp);
}
static int sp_platform_resume(struct platform_device *pdev)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 872ea3ff1c6b..076669dc1035 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -45,7 +45,6 @@ enum cc_key_type {
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
- int key_round_number;
int cipher_mode;
int flow_mode;
unsigned int flags;
@@ -56,6 +55,8 @@ struct cc_cipher_ctx {
struct cc_cpp_key_info cpp;
};
struct crypto_shash *shash_tfm;
+ struct crypto_skcipher *fallback_tfm;
+ bool fallback_on;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
@@ -75,7 +76,6 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
- ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
@@ -159,22 +159,49 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
skcipher_alg.base);
struct device *dev = drvdata_to_dev(cc_alg->drvdata);
unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
- int rc = 0;
+ unsigned int fallback_req_size = 0;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct cipher_req_ctx));
-
ctx_p->cipher_mode = cc_alg->cipher_mode;
ctx_p->flow_mode = cc_alg->flow_mode;
ctx_p->drvdata = cc_alg->drvdata;
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ const char *name = crypto_tfm_alg_name(tfm);
+
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ max_key_buf_size <<= 1;
+
+ /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ ctx_p->fallback_tfm =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+
+ if (IS_ERR(ctx_p->fallback_tfm)) {
+ /* Note we're still allowing registration with no fallback since it's
+ * better to have most modes supported than none at all.
+ */
+ dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n",
+ name);
+ ctx_p->fallback_tfm = NULL;
+ } else {
+ fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm);
+ }
+ }
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct cipher_req_ctx) + fallback_req_size);
+
/* Allocate key buffer, cache line aligned */
- ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+ ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
- return -ENOMEM;
+ goto free_fallback;
dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
ctx_p->user.key);
@@ -186,21 +213,20 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
- return -ENOMEM;
+ goto free_key;
}
dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
- /* Alloc hash tfm for essiv */
- ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
- if (IS_ERR(ctx_p->shash_tfm)) {
- dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
- return PTR_ERR(ctx_p->shash_tfm);
- }
- }
+ return 0;
- return rc;
+free_key:
+ kfree(ctx_p->user.key);
+free_fallback:
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ crypto_free_shash(ctx_p->shash_tfm);
+
+ return -ENOMEM;
}
static void cc_cipher_exit(struct crypto_tfm *tfm)
@@ -220,6 +246,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
/* Free hash tfm for essiv */
crypto_free_shash(ctx_p->shash_tfm);
ctx_p->shash_tfm = NULL;
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ ctx_p->fallback_tfm = NULL;
}
/* Unmap key buffer */
@@ -303,6 +331,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
}
ctx_p->keylen = keylen;
+ ctx_p->fallback_on = false;
switch (cc_slot_to_key_type(hki.hw_key1)) {
case CC_HW_PROTECTED_KEY:
@@ -388,10 +417,33 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_dbg(dev, "Unsupported key size %d.\n", keylen);
+ dev_dbg(dev, "Invalid key size %d.\n", keylen);
return -EINVAL;
}
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+
+ /* We only support 256 bit ESSIV-CBC-AES keys */
+ if (keylen != AES_KEYSIZE_256) {
+ unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK;
+
+ if (likely(ctx_p->fallback_tfm)) {
+ ctx_p->fallback_on = true;
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags);
+ return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen);
+ }
+
+ dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen);
+ return -EINVAL;
+ }
+
+ /* Internal ESSIV key buffer is double sized */
+ max_key_buf_size <<= 1;
+ }
+
+ ctx_p->fallback_on = false;
ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
@@ -419,21 +471,20 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
max_key_buf_size, DMA_TO_DEVICE);
memcpy(ctx_p->user.key, key, keylen);
- if (keylen == 24)
- memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* sha256 for key2 - use sw implementation */
- int key_len = keylen >> 1;
int err;
err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
- ctx_p->user.key, key_len,
- ctx_p->user.key + key_len);
+ ctx_p->user.key, keylen,
+ ctx_p->user.key + keylen);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
}
+
+ keylen <<= 1;
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
@@ -571,9 +622,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
+ unsigned int key_len = (ctx_p->keylen / 2);
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
+ unsigned int key_offset = key_len;
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
@@ -593,6 +645,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+
+ if (cipher_mode == DRV_CIPHER_ESSIV)
+ key_len = SHA256_DIGEST_SIZE;
+
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -602,12 +658,12 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
ctx_p->hw.key2_slot);
} else {
set_din_type(&desc[*seq_size], DMA_DLLI,
- (key_dma_addr + (key_len / 2)),
- (key_len / 2), NS_BIT);
+ (key_dma_addr + key_offset),
+ key_len, NS_BIT);
}
set_xex_data_unit_size(&desc[*seq_size], du_size);
set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_key_size_aes(&desc[*seq_size], key_len);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
@@ -616,7 +672,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_key_size_aes(&desc[*seq_size], key_len);
set_flow_mode(&desc[*seq_size], flow_mode);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
@@ -867,6 +923,17 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
+ if (ctx_p->fallback_on) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm);
+ if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ return crypto_skcipher_encrypt(subreq);
+ else
+ return crypto_skcipher_decrypt(subreq);
+ }
+
/* The IV we are handed may be allocted from the stack so
* we must copy it to a DMAable buffer before use.
*/
@@ -1010,7 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv(paes)",
+ .name = "essiv(cbc(paes),sha256)",
.driver_name = "essiv-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1028,7 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv512(paes)",
+ .name = "essiv512(cbc(paes),sha256)",
.driver_name = "essiv-paes-du512-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1047,7 +1114,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv4096(paes)",
+ .name = "essiv4096(cbc(paes),sha256)",
.driver_name = "essiv-paes-du4096-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1269,15 +1336,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv(aes)",
+ .name = "essiv(cbc(aes),sha256)",
.driver_name = "essiv-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
@@ -1286,15 +1353,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv512(aes)",
+ .name = "essiv512(cbc(aes),sha256)",
.driver_name = "essiv-aes-du512-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
@@ -1304,15 +1371,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv4096(aes)",
+ .name = "essiv4096(cbc(aes),sha256)",
.driver_name = "essiv-aes-du4096-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4c2553672b6f..13b908ea4873 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
- u32 flags,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int nbytes,
+static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+ struct skcipher_request *req,
u8 *iv,
unsigned short op_type)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
int err;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
-
- skcipher_request_set_sync_tfm(subreq, cipher);
- skcipher_request_set_callback(subreq, flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, src, dst,
- nbytes, iv);
+ skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
+ skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
+ req->cryptlen, iv);
- err = op_type ? crypto_skcipher_decrypt(subreq) :
- crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
+ crypto_skcipher_encrypt(&reqctx->fallback_req);
return err;
@@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
{
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ crypto_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ crypto_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -1206,13 +1202,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
req);
memcpy(req->iv, reqctx->init_iv, IV);
atomic_inc(&adap->chcr_stats.fallback);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->cryptlen,
- req->iv,
- reqctx->op);
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
+ reqctx->op);
goto complete;
}
@@ -1224,7 +1215,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) {
- pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+ pr_err("%s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb);
goto unmap;
}
@@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req,
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
fallback: atomic_inc(&adap->chcr_stats.fallback);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->cryptlen,
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
subtype ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
reqctx->iv : req->iv,
@@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
init_completion(&ctx->cbc_aes_aio_done);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx);
}
@@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx);
}
@@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_sync_skcipher(ablkctx->sw_cipher);
+ crypto_free_skcipher(ablkctx->sw_cipher);
}
static int get_alg_config(struct algo_param *params,
@@ -1556,7 +1545,7 @@ static int get_alg_config(struct algo_param *params,
params->result_size = SHA512_DIGEST_SIZE;
break;
default:
- pr_err("chcr : ERROR, unsupported digest size\n");
+ pr_err("ERROR, unsupported digest size\n");
return -EINVAL;
}
return 0;
@@ -3571,7 +3560,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
goto out;
if (get_alg_config(&param, max_authsize)) {
- pr_err("chcr : Unsupported digest size\n");
+ pr_err("Unsupported digest size\n");
goto out;
}
subtype = get_aead_subtype(authenc);
@@ -3590,7 +3579,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("Unsupported cipher key\n");
goto out;
}
@@ -3608,10 +3597,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
}
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
- pr_err("chcr : Base driver cannot be loaded\n");
- aeadctx->enckey_len = 0;
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
+ pr_err("Base driver cannot be loaded\n");
+ goto out;
}
{
SHASH_DESC_ON_STACK(shash, base_hash);
@@ -3626,7 +3613,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
keys.authkeylen,
o_ptr);
if (err) {
- pr_err("chcr : Base driver cannot be loaded\n");
+ pr_err("Base driver cannot be loaded\n");
goto out;
}
keys.authkeylen = max_authsize;
@@ -3711,7 +3698,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
+ pr_err("Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
@@ -3747,7 +3734,7 @@ static int chcr_aead_op(struct aead_request *req,
cdev = a_ctx(tfm)->dev;
if (!cdev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
+ pr_err("%s : No crypto device.\n", __func__);
return -ENXIO;
}
@@ -4445,6 +4432,7 @@ static int chcr_register_alg(void)
driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
driver_algs[i].alg.skcipher.base.cra_flags =
CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.skcipher.base.cra_ctxsize =
sizeof(struct chcr_context) +
@@ -4456,7 +4444,8 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4476,7 +4465,8 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ a_hash->halg.base.cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
@@ -4497,8 +4487,7 @@ static int chcr_register_alg(void)
break;
}
if (err) {
- pr_err("chcr : %s : Algorithm registration failed\n",
- name);
+ pr_err("%s : Algorithm registration failed\n", name);
goto register_err;
} else {
driver_algs[i].is_registered = 1;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 31e427e273f8..e89f9e0094b4 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_sync_skcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
__be32 key_ctx_hdr;
unsigned int enckey_len;
unsigned char ciph_mode;
@@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx {
u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx;
u16 rxqidx;
+ struct skcipher_request fallback_req; // keep at the end
};
struct chcr_alg_template {
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index a3ee127a70e3..b135c74fb619 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -12,7 +12,6 @@
#include <linux/topology.h>
#include "hpre.h"
-#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -46,9 +45,9 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE 0x3f
+#define HPRE_HAC_RAS_CE_ENABLE 0x1
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0
+#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -83,6 +82,10 @@
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+#define HPRE_QM_BME_FLR BIT(7)
+#define HPRE_QM_PM_FLR BIT(11)
+#define HPRE_QM_SRIOV_FLR BIT(12)
+
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -231,6 +234,22 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
+/*
+ * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * Or it may stay in D3 state when we bind and unbind hpre quickly,
+ * as it does FLR triggered by hardware.
+ */
+static void disable_flr_of_bme(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
+ val |= HPRE_QM_PM_FLR;
+ writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+}
+
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -242,10 +261,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
- /* disable FLR triggered by BME(bus master enable) */
- writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
- writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
-
/* HPRE need more time, we close this interrupt */
val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
@@ -264,7 +279,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
- val & BIT(0),
+ val & BIT(0),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret) {
@@ -296,6 +311,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (ret)
dev_err(dev, "acpi_evaluate_dsm err.\n");
+ disable_flr_of_bme(qm);
+
return ret;
}
@@ -372,7 +389,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
-
if (val > num_vfs)
return -EINVAL;
@@ -449,7 +465,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+ size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
@@ -477,7 +493,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
}
static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+ size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
@@ -548,13 +564,15 @@ static int hpre_debugfs_atomic64_get(void *data, u64 *val)
static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
struct hpre_dfx *dfx_item = data;
- struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+ struct hpre_dfx *hpre_dfx = NULL;
- if (val)
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
+ hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ } else if (val) {
return -EINVAL;
+ }
- if (dfx_item->type == HPRE_OVERTIME_THRHLD)
- atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
atomic64_set(&dfx_item->value, val);
return 0;
@@ -563,15 +581,17 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
-static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
+static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
+ struct hpre_debug *dbg = &hpre->debug;
struct dentry *file_dir;
if (dir)
file_dir = dir;
else
- file_dir = dbg->debug_root;
+ file_dir = qm->debug.debug_root;
if (type >= HPRE_DEBUG_FILE_NUM)
return -EINVAL;
@@ -586,10 +606,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
return 0;
}
-static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
+static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
@@ -601,14 +619,12 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
-static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
+static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
@@ -619,7 +635,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -630,7 +646,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->base = qm->io_base + hpre_cluster_offsets[i];
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
+ ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
return ret;
@@ -639,32 +655,31 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
return 0;
}
-static int hpre_ctrl_debug_init(struct hpre_debug *debug)
+static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{
int ret;
- ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
+ ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
HPRE_CURRENT_QM);
if (ret)
return ret;
- ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
+ ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE);
if (ret)
return ret;
- ret = hpre_pf_comm_regs_debugfs_init(debug);
+ ret = hpre_pf_comm_regs_debugfs_init(qm);
if (ret)
return ret;
- return hpre_cluster_debugfs_init(debug);
+ return hpre_cluster_debugfs_init(qm);
}
-static void hpre_dfx_debug_init(struct hpre_debug *debug)
+static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
struct hpre_dfx *dfx = hpre->debug.dfx;
- struct hisi_qm *qm = &hpre->qm;
struct dentry *parent;
int i;
@@ -676,30 +691,27 @@ static void hpre_dfx_debug_init(struct hpre_debug *debug)
}
}
-static int hpre_debugfs_init(struct hpre *hpre)
+static int hpre_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
- struct dentry *dir;
int ret;
- dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- qm->debug.debug_root = dir;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
+
qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
-
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
- hpre->debug.debug_root = dir;
- ret = hpre_ctrl_debug_init(&hpre->debug);
+ ret = hpre_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
- hpre_dfx_debug_init(&hpre->debug);
+ hpre_dfx_debug_init(qm);
return 0;
@@ -708,10 +720,8 @@ failed_to_create:
return ret;
}
-static void hpre_debugfs_exit(struct hpre *hpre)
+static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
}
@@ -732,6 +742,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
}
@@ -849,7 +860,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_with_err_init;
- ret = hpre_debugfs_init(hpre);
+ ret = hpre_debugfs_init(qm);
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
@@ -874,6 +885,7 @@ err_with_crypto_register:
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm);
err_with_err_init:
@@ -905,7 +917,7 @@ static void hpre_remove(struct pci_dev *pdev)
qm->debug.curr_qm_qp_num = 0;
}
- hpre_debugfs_exit(hpre);
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
@@ -924,7 +936,8 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
- .sriov_configure = hisi_qm_sriov_configure,
+ .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
+ hisi_qm_sriov_configure : NULL,
.err_handler = &hpre_err_handler,
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 9bb263cec6c3..6527c53b073f 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1064,19 +1064,10 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
char buf[QM_DBG_READ_LEN];
int len;
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
-
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
-
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
@@ -1741,7 +1732,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
-static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
+static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
@@ -1813,7 +1804,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
int qp_id = qp->qp_id;
- int pasid = arg;
+ u32 pasid = arg;
int ret;
if (!qm_qp_avail_state(qm, qp, QP_START))
@@ -2179,8 +2170,12 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
};
+ int ret;
- strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
+ ret = strscpy(interface.name, pdev->driver->name,
+ sizeof(interface.name));
+ if (ret < 0)
+ return -ENAMETOOLONG;
uacce = uacce_alloc(&pdev->dev, &interface);
if (IS_ERR(uacce))
@@ -2691,24 +2686,12 @@ static ssize_t qm_status_read(struct file *filp, char __user *buffer,
{
struct hisi_qm *qm = filp->private_data;
char buf[QM_DBG_READ_LEN];
- int val, cp_len, len;
-
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
+ int val, len;
val = atomic_read(&qm->status.flags);
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- if (!len)
- return -EFAULT;
-
- cp_len = copy_to_user(buffer, buf, len);
- if (cp_len)
- return -EFAULT;
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static const struct file_operations qm_status_fops = {
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 0a351de8d838..6c1d3c7d64ee 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -44,6 +44,7 @@
#define QM_AXI_M_CFG 0x1000ac
#define AXI_M_CFG 0xffff
#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
#define AXI_M_CFG_ENABLE 0xffffffff
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index c27e7160d2df..8ca945ac297e 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
dma_addr_t *psec_sgl,
struct scatterlist *sgl,
int count,
- struct sec_dev_info *info)
+ struct sec_dev_info *info,
+ gfp_t gfp)
{
struct sec_hw_sgl *sgl_current = NULL;
struct sec_hw_sgl *sgl_next;
@@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
sge_index = i % SEC_MAX_SGE_NUM;
if (sge_index == 0) {
sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
- GFP_KERNEL, &sgl_next_dma);
+ gfp, &sgl_next_dma);
if (!sgl_next) {
ret = -ENOMEM;
goto err_free_hw_sgls;
@@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
}
static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
- int *steps)
+ int *steps, gfp_t gfp)
{
size_t *sizes;
int i;
/* Split into suitable sized blocks */
*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
- sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ sizes = kcalloc(*steps, sizeof(*sizes), gfp);
if (!sizes)
return -ENOMEM;
@@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
int steps, struct scatterlist ***splits,
int **splits_nents,
int sgl_len_in,
- struct device *dev)
+ struct device *dev, gfp_t gfp)
{
int ret, count;
@@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
if (!count)
return -EINVAL;
- *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
if (!*splits) {
ret = -ENOMEM;
goto err_unmap_sg;
}
- *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ *splits_nents = kcalloc(steps, sizeof(int), gfp);
if (!*splits_nents) {
ret = -ENOMEM;
goto err_free_splits;
@@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
/* output the scatter list before and after this */
ret = sg_split(sgl, count, 0, steps, split_sizes,
- *splits, *splits_nents, GFP_KERNEL);
+ *splits, *splits_nents, gfp);
if (ret) {
ret = -ENOMEM;
goto err_free_splits_nents;
@@ -630,13 +631,13 @@ static struct sec_request_el
int el_size, bool different_dest,
struct scatterlist *sgl_in, int n_ents_in,
struct scatterlist *sgl_out, int n_ents_out,
- struct sec_dev_info *info)
+ struct sec_dev_info *info, gfp_t gfp)
{
struct sec_request_el *el;
struct sec_bd_info *req;
int ret;
- el = kzalloc(sizeof(*el), GFP_KERNEL);
+ el = kzalloc(sizeof(*el), gfp);
if (!el)
return ERR_PTR(-ENOMEM);
el->el_length = el_size;
@@ -668,7 +669,7 @@ static struct sec_request_el
el->sgl_in = sgl_in;
ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
- n_ents_in, info);
+ n_ents_in, info, gfp);
if (ret)
goto err_free_el;
@@ -679,7 +680,7 @@ static struct sec_request_el
el->sgl_out = sgl_out;
ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
el->sgl_out,
- n_ents_out, info);
+ n_ents_out, info, gfp);
if (ret)
goto err_free_hw_sgl_in;
@@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
bool split = skreq->src != skreq->dst;
+ gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_in = sg_nents(skreq->src);
ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
- &steps);
+ &steps, gfp);
if (ret)
return ret;
sec_req->num_elements = steps;
ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
&splits_in_nents, sec_req->len_in,
- info->dev);
+ info->dev, gfp);
if (ret)
goto err_free_split_sizes;
@@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
- sec_req->len_out, info->dev);
+ sec_req->len_out, info->dev, gfp);
if (ret)
goto err_unmap_in_sg;
}
@@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
splits_in[i], splits_in_nents[i],
split ? splits_out[i] : NULL,
split ? splits_out_nents[i] : 0,
- info);
+ info, gfp);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;
@@ -932,7 +934,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "hisi_sec_aes_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -951,7 +954,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "hisi_sec_aes_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -970,7 +974,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "hisi_sec_aes_ctr",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -989,7 +994,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "hisi_sec_aes_xts",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1009,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(des)",
.cra_driver_name = "hisi_sec_des_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1028,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(des)",
.cra_driver_name = "hisi_sec_des_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1047,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "hisi_sec_3des_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1066,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "hisi_sec_3des_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 7b64aca704d6..037762b531e2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -46,9 +46,11 @@ struct sec_req {
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
+ struct list_head backlog_head;
int err_type;
int req_id;
+ int flag;
/* Status of the SEC request */
bool fake_busy;
@@ -104,6 +106,7 @@ struct sec_qp_ctx {
struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
+ struct list_head backlog;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
atomic_t pending_reqs;
@@ -161,6 +164,7 @@ struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
atomic64_t send_busy_cnt;
+ atomic64_t recv_busy_cnt;
atomic64_t err_bd_cnt;
atomic64_t invalid_req_cnt;
atomic64_t done_flag_cnt;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 64614a9bdf21..497969ae8b23 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
+ atomic_inc(&qp->qp_status.used);
return;
}
req->err_type = bd->type2.error_type;
@@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int ret;
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
+ list_add_tail(&req->backlog_head, &qp_ctx->backlog);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
+ mutex_unlock(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
mutex_unlock(&qp_ctx->req_lock);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
- if (!ret) {
- if (req->fake_busy) {
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- ret = -EBUSY;
- } else {
- ret = -EINPROGRESS;
- }
+ if (likely(!ret)) {
+ ret = -EINPROGRESS;
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
}
return ret;
@@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
@@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
+static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *backlog_req = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ if (ctx->fake_req_limit >=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !list_empty(&qp_ctx->backlog)) {
+ backlog_req = list_first_entry(&qp_ctx->backlog,
+ typeof(*backlog_req), backlog_head);
+ list_del(&backlog_req->backlog_head);
+ }
+ mutex_unlock(&qp_ctx->req_lock);
+
+ return backlog_req;
+}
+
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct skcipher_request *backlog_sk_req;
+ struct sec_req *backlog_req;
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- if (req->fake_busy)
- sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(ctx, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_sk_req = backlog_req->c_req.sk_req;
+ backlog_sk_req->base.complete(&backlog_sk_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
+ }
+
sk_req->base.complete(&sk_req->base, err);
}
@@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct aead_request *backlog_aead_req;
+ struct sec_req *backlog_req;
size_t sz;
- atomic_dec(&qp_ctx->pending_reqs);
-
if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
sec_update_iv(req, SEC_AEAD);
@@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
sec_free_req_id(req);
- if (req->fake_busy)
- a_req->base.complete(&a_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(c, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_aead_req = backlog_req->aead_req.aead_req;
+ backlog_aead_req->base.complete(&backlog_aead_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
+ }
a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
sec_free_queue_id(ctx, req);
}
@@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
return req->req_id;
}
- if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = true;
- else
- req->fake_busy = false;
-
return 0;
}
@@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
+ (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
+ req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
@@ -1435,7 +1475,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
+ req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
@@ -1558,7 +1599,7 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index a4cb58b54b25..2297425486cb 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -22,17 +22,15 @@
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
-#define SEC_XTS_MIV_ENABLE_REG 0x301384
-#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
-#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0xfffff7fd
-#define SEC_BD_ERR_CHK_EN2 0xffffbfff
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
-#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
-#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_DEF 2
#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
@@ -47,17 +45,18 @@
#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
+#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
-#define SEC_RAS_CE_REG 0x50
-#define SEC_RAS_FE_REG 0x54
-#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_REG 0x301050
+#define SEC_RAS_FE_REG 0x301054
+#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
-#define SEC_QM_ABNORMAL_INT_MASK 0x100004
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
@@ -68,8 +67,10 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_SAA_EN_REG 0x0270
+#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG2 0x038c
+#define SEC_BD_ERR_CHK_EN_REG3 0x038c
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -77,8 +78,8 @@
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
-#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
@@ -122,6 +123,7 @@ static struct sec_dfx_item sec_dfx_labels[] = {
{"send_cnt", offsetof(struct sec_dfx, send_cnt)},
{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+ {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
@@ -191,7 +193,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -280,7 +282,7 @@ static int sec_engine_init(struct hisi_qm *qm)
reg, reg & 0x1, SEC_DELAY_10_US,
SEC_POLL_TIMEOUT_US);
if (ret) {
- dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ pci_err(qm->pdev, "fail to init sec mem\n");
return ret;
}
@@ -296,25 +298,25 @@ static int sec_engine_init(struct hisi_qm *qm)
reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ writel(SEC_SINGLE_PORT_MAX_TRANS,
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
+
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+
+ /* Enable sm4 extra mode, as ctr/ecb */
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+ /* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
- writel_relaxed(SEC_BD_ERR_CHK_EN2,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
-
- /* enable clock gate control */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
- reg |= SEC_CLK_GATE_ENABLE;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- /* Enable sm4 xts mode multiple iv */
- writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
- qm->io_base + SEC_XTS_MIV_ENABLE_REG);
-
return 0;
}
@@ -346,10 +348,17 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
+ int i;
+
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+ /* clear sec dfx regs */
+ writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
+ readl(qm->io_base + sec_dfx_regs[i].offset);
+
/* clear rdclr_en */
writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
@@ -362,14 +371,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
+ pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable SEC hw error interrupts */
writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -382,14 +391,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
/* enable SEC block master OOO when m-bit error occur */
val = val | SEC_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -402,7 +411,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
/* disable SEC block master OOO when m-bit error occur */
val = val & SEC_AXI_SHUTDOWN_DISABLE;
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static u32 sec_current_qm_read(struct sec_debug_file *file)
@@ -577,20 +586,20 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
sec_debugfs_atomic64_set, "%lld\n");
-static int sec_core_debug_init(struct sec_dev *sec)
+static int sec_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i;
- tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
- return -ENOENT;
+ return -ENOMEM;
regset->regs = sec_dfx_regs;
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
@@ -609,44 +618,44 @@ static int sec_core_debug_init(struct sec_dev *sec)
return 0;
}
-static int sec_debug_init(struct sec_dev *sec)
+static int sec_debug_init(struct hisi_qm *qm)
{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
- for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->debug.files[i].lock);
- sec->debug.files[i].index = i;
- sec->debug.files[i].qm = &sec->qm;
-
- debugfs_create_file(sec_dbg_file_name[i], 0600,
- sec->qm.debug.debug_root,
- sec->debug.files + i,
- &sec_dbg_fops);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
+
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
+ qm->debug.debug_root,
+ sec->debug.files + i,
+ &sec_dbg_fops);
+ }
}
- return sec_core_debug_init(sec);
+ return sec_core_debug_init(qm);
}
-static int sec_debugfs_init(struct sec_dev *sec)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
struct device *dev = &qm->pdev->dev;
int ret;
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
-
qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
- if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = sec_debug_init(sec);
- if (ret)
- goto failed_to_create;
- }
+ ret = sec_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
return 0;
@@ -656,9 +665,9 @@ failed_to_create:
return ret;
}
-static void sec_debugfs_exit(struct sec_dev *sec)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
- debugfs_remove_recursive(sec->qm.debug.debug_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -677,8 +686,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
- dev_err(dev, "multi ecc sram addr=0x%x\n",
- SEC_ECC_ADDR(err_val));
}
}
errs++;
@@ -868,7 +875,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_uninit;
}
- ret = sec_debugfs_init(sec);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
@@ -893,7 +900,7 @@ err_crypto_unregister:
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- sec_debugfs_exit(sec);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm);
err_probe_uninit:
@@ -917,7 +924,7 @@ static void sec_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev);
- sec_debugfs_exit(sec);
+ sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm);
@@ -987,5 +994,6 @@ module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
+MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f3ed4c0e5493..4484be13812b 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -76,7 +76,7 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index c73707c2e539..01fd6a78111d 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -158,13 +158,13 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
hisi_qm_release_qp(ctx->qp);
}
-static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
+static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
int ret, i, j;
- ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
if (ret) {
pr_err("Can not create zip qps!\n");
return -ENODEV;
@@ -379,7 +379,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
int ret;
- ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name));
+ ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
if (ret)
return ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 2229a21ae7c8..e2845b2c963d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -234,9 +234,10 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-int zip_create_qps(struct hisi_qp **qps, int qp_num)
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
{
- int node = cpu_to_node(smp_processor_id());
+ if (node == NUMA_NO_NODE)
+ node = cpu_to_node(smp_processor_id());
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 0e25fc3087f3..87226b7c2795 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -330,7 +330,7 @@ static int img_hash_write_via_dma(struct img_hash_dev *hdev)
static int img_hash_dma_init(struct img_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
- int err = -EINVAL;
+ int err;
hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
if (IS_ERR(hdev->dma_lch)) {
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 2cb53fbae841..fa7398e68858 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
+ int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
- int ret, irq;
+ int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
+ /* Set affinity */
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
return irq;
}
@@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
+ i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
@@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev,
return irq;
}
+ priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
- for (i = 0; i < priv->config.rings; i++)
+ for (i = 0; i < priv->config.rings; i++) {
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
+ }
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 94016c505abb..7c5fe382d272 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -707,6 +707,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
+
+ /* irq of this ring */
+ int irq;
};
/* EIP integration context flags */
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 0c5e80c3f6e3..1ac3253b7903 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1300,6 +1300,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1337,6 +1338,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1374,6 +1376,7 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = {
.cra_driver_name = "safexcel-cfb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1411,6 +1414,7 @@ struct safexcel_alg_template safexcel_alg_ofb_aes = {
.cra_driver_name = "safexcel-ofb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1485,6 +1489,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = {
.cra_driver_name = "safexcel-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1545,6 +1550,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = {
.cra_driver_name = "safexcel-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1582,6 +1588,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
.cra_driver_name = "safexcel-ecb-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1642,6 +1649,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
.cra_driver_name = "safexcel-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1679,6 +1687,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
.cra_driver_name = "safexcel-ecb-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1751,6 +1760,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1786,6 +1796,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1821,6 +1832,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1856,6 +1868,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1891,6 +1904,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1927,6 +1941,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1963,6 +1978,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1999,6 +2015,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2035,6 +2052,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2071,6 +2089,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2107,6 +2126,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2143,6 +2163,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2179,6 +2200,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2215,6 +2237,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2251,6 +2274,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2285,6 +2309,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2319,6 +2344,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2353,6 +2379,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2387,6 +2414,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2421,6 +2449,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2534,6 +2563,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = {
.cra_driver_name = "safexcel-xts-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = XTS_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2646,6 +2676,7 @@ struct safexcel_alg_template safexcel_alg_gcm = {
.cra_driver_name = "safexcel-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2769,6 +2800,7 @@ struct safexcel_alg_template safexcel_alg_ccm = {
.cra_driver_name = "safexcel-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2832,6 +2864,7 @@ struct safexcel_alg_template safexcel_alg_chacha20 = {
.cra_driver_name = "safexcel-chacha20",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2993,6 +3026,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly = {
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
@@ -3032,6 +3066,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
@@ -3110,6 +3145,7 @@ struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
.cra_driver_name = "safexcel-ecb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3147,6 +3183,7 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
.cra_driver_name = "safexcel-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3184,6 +3221,7 @@ struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
.cra_driver_name = "safexcel-ofb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3221,6 +3259,7 @@ struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
.cra_driver_name = "safexcel-cfb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3273,6 +3312,7 @@ struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
.cra_driver_name = "safexcel-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3332,6 +3372,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3441,6 +3482,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SM4_BLOCK_SIZE,
@@ -3476,6 +3518,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3510,6 +3553,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3578,6 +3622,7 @@ struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
.cra_driver_name = "safexcel-rfc4106-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3622,6 +3667,7 @@ struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
.cra_driver_name = "safexcel-rfc4543-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3713,6 +3759,7 @@ struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
.cra_driver_name = "safexcel-rfc4309-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 43962bc709c6..16a467969d8e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -992,6 +992,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
.cra_driver_name = "safexcel-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1235,6 +1236,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.cra_driver_name = "safexcel-hmac-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1291,6 +1293,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = {
.cra_driver_name = "safexcel-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1347,6 +1350,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
.cra_driver_name = "safexcel-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1418,6 +1422,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.cra_driver_name = "safexcel-hmac-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1489,6 +1494,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.cra_driver_name = "safexcel-hmac-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1545,6 +1551,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = {
.cra_driver_name = "safexcel-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1601,6 +1608,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = {
.cra_driver_name = "safexcel-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1672,6 +1680,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
.cra_driver_name = "safexcel-hmac-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1743,6 +1752,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
.cra_driver_name = "safexcel-hmac-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1799,6 +1809,7 @@ struct safexcel_alg_template safexcel_alg_md5 = {
.cra_driver_name = "safexcel-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1871,6 +1882,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
.cra_driver_name = "safexcel-hmac-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1952,6 +1964,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = {
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2041,6 +2054,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_driver_name = "safexcel-cbcmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2136,6 +2150,7 @@ struct safexcel_alg_template safexcel_alg_xcbcmac = {
.cra_driver_name = "safexcel-xcbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2232,6 +2247,7 @@ struct safexcel_alg_template safexcel_alg_cmac = {
.cra_driver_name = "safexcel-cmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2288,6 +2304,7 @@ struct safexcel_alg_template safexcel_alg_sm3 = {
.cra_driver_name = "safexcel-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2359,6 +2376,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
.cra_driver_name = "safexcel-hmac-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index ad73fc946682..f478bb0a566a 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1402,7 +1402,8 @@ static int __init ixp_module_init(void)
/* block ciphers */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
if (!cra->setkey)
cra->setkey = ablk_setkey;
if (!cra->encrypt)
@@ -1435,7 +1436,8 @@ static int __init ixp_module_init(void)
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 8a5f0b0bdf77..d63bca9718dc 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -438,7 +438,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
struct resource *res;
- int irq, ret, i;
+ int irq, ret, i, cpu;
u32 sram_size;
if (cesa_dev) {
@@ -505,6 +505,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
goto err_cleanup;
}
+ engine->irq = irq;
+
/*
* Not all platforms can gate the CESA clocks: do not complain
* if the clock does not exist.
@@ -548,6 +550,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup;
+ /* Set affinity */
+ cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
atomic_set(&engine->load, 0);
INIT_LIST_HEAD(&engine->complete_queue);
@@ -570,6 +576,8 @@ err_cleanup:
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
+ if (cesa->engines[i].irq > 0)
+ irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return ret;
@@ -586,6 +594,7 @@ static int mv_cesa_remove(struct platform_device *pdev)
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
+ irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return 0;
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index e8632d5f343f..0c9cbb681e49 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -457,6 +457,7 @@ struct mv_cesa_engine {
atomic_t load;
struct mv_cesa_tdma_chain chain;
struct list_head complete_queue;
+ int irq;
};
/**
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index f133c2ccb5ae..45b4d7a29833 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -508,7 +508,8 @@ struct skcipher_alg mv_cesa_ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "mv-ecb-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
@@ -558,7 +559,8 @@ struct skcipher_alg mv_cesa_cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "mv-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
@@ -616,7 +618,8 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
@@ -669,7 +672,8 @@ struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "mv-cbc-des3-ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
@@ -741,7 +745,8 @@ struct skcipher_alg mv_cesa_ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
@@ -790,7 +795,8 @@ struct skcipher_alg mv_cesa_cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index b971284332b6..bd0bd9ffd6e9 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -921,6 +921,7 @@ struct ahash_alg mv_md5_alg = {
.cra_driver_name = "mv-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -991,6 +992,7 @@ struct ahash_alg mv_sha1_alg = {
.cra_driver_name = "mv-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -1064,6 +1066,7 @@ struct ahash_alg mv_sha256_alg = {
.cra_driver_name = "mv-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -1298,6 +1301,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.cra_driver_name = "mv-hmac-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
@@ -1368,6 +1372,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.cra_driver_name = "mv-hmac-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
@@ -1438,6 +1443,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.cra_driver_name = "mv-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index fec8f3b9b112..cc103b1bc224 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -878,11 +878,11 @@ static int copy_ucode_to_dma_mem(struct device *dev,
/* Byte swap 64-bit */
for (i = 0; i < (ucode->size / 8); i++)
- ((u64 *)ucode->align_va)[i] =
+ ((__be64 *)ucode->align_va)[i] =
cpu_to_be64(((u64 *)ucode->align_va)[i]);
/* Ucode needs 16-bit swap */
for (i = 0; i < (ucode->size / 2); i++)
- ((u16 *)ucode->align_va)[i] =
+ ((__be16 *)ucode->align_va)[i] =
cpu_to_be16(((u16 *)ucode->align_va)[i]);
return 0;
}
@@ -1463,8 +1463,8 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps,
int pf_type)
{
- struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 };
- struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
struct tar_arch_info_t *tar_arch = NULL;
char *tar_filename;
int i, ret = 0;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
index 14f02b60d0c2..8620ac87a447 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -74,7 +74,7 @@ struct otx_cpt_ucode_ver_num {
struct otx_cpt_ucode_hdr {
struct otx_cpt_ucode_ver_num ver_num;
u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];
- u32 code_length;
+ __be32 code_length;
u32 padding[3];
};
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 1e0a1d70ebd3..90bb31329d4b 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -239,7 +239,6 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
int ivsize = crypto_skcipher_ivsize(stfm);
u32 start = req->cryptlen - ivsize;
- u64 *ctrl_flags = NULL;
gfp_t flags;
flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -280,8 +279,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
- ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
- *ctrl_flags = cpu_to_be64(*ctrl_flags);
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
@@ -692,20 +690,17 @@ static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
static inline void swap_data32(void *buf, u32 len)
{
- u32 *store = (u32 *) buf;
- int i = 0;
-
- for (i = 0 ; i < len/sizeof(u32); i++, store++)
- *store = cpu_to_be32(*store);
+ cpu_to_be32_array(buf, buf, len / 4);
}
static inline void swap_data64(void *buf, u32 len)
{
- u64 *store = (u64 *) buf;
+ __be64 *dst = buf;
+ u64 *src = buf;
int i = 0;
- for (i = 0 ; i < len/sizeof(u64); i++, store++)
- *store = cpu_to_be64(*store);
+ for (i = 0 ; i < len / 8; i++, src++, dst++)
+ *dst = cpu_to_be64p(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
@@ -1012,7 +1007,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
/* Unknown cipher type */
return -EINVAL;
}
- rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags);
+ rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
@@ -1032,7 +1027,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
fctx->enc.enc_ctrl.e.mac_len = mac_len;
- fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
@@ -1306,7 +1301,7 @@ static int otx_cpt_aead_null_decrypt(struct aead_request *req)
static struct skcipher_alg otx_cpt_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "cpt_xts_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1323,7 +1318,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cpt_cbc_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1340,7 +1335,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "cpt_ecb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1357,7 +1352,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "cpt_cfb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1374,7 +1369,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
@@ -1391,7 +1386,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "cpt_ecb_des3_ede",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
@@ -1412,7 +1407,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1431,7 +1426,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1450,7 +1445,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1469,7 +1464,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1488,7 +1483,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1507,7 +1502,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1526,7 +1521,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1545,7 +1540,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1564,7 +1559,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
index 67cc0025f5d5..4181b5c5c356 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -66,7 +66,8 @@ enum otx_cpt_aes_key_len {
};
union otx_cpt_encr_ctrl {
- u64 flags;
+ __be64 flags;
+ u64 cflags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 enc_cipher:4;
@@ -138,7 +139,8 @@ struct otx_cpt_des3_ctx {
};
union otx_cpt_offset_ctrl_word {
- u64 flags;
+ __be64 flags;
+ u64 cflags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved:32;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index 239195cccf93..cbc3d7869ebe 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -202,11 +202,10 @@ static inline int setup_sgio_list(struct pci_dev *pdev,
info->dlen = dlen;
info->in_buffer = (u8 *)info + info_len;
- ((u16 *)info->in_buffer)[0] = req->outcnt;
- ((u16 *)info->in_buffer)[1] = req->incnt;
+ ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
+ ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
((u16 *)info->in_buffer)[2] = 0;
((u16 *)info->in_buffer)[3] = 0;
- *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
/* Setup gather (input) components */
if (setup_sgio_components(pdev, req->in, req->incnt,
@@ -367,8 +366,6 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
- /* 64-bit swap for microcode data reads, not needed for addresses*/
- iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64);
iq_cmd.dptr = info->dptr_baddr;
iq_cmd.rptr = info->rptr_baddr;
iq_cmd.cptr.u64 = 0;
@@ -436,7 +433,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
u8 ccode = cpt_status->s.compcode;
union otx_cpt_error_code ecode;
- ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer));
+ ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer);
switch (ccode) {
case CPT_COMP_E_FAULT:
dev_err(&pdev->dev,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
index a4c9ff730b13..d912fe0c532d 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
@@ -92,10 +92,10 @@ union otx_cpt_ctrl_info {
union otx_cpt_iq_cmd_word0 {
u64 u64;
struct {
- u16 opcode;
- u16 param1;
- u16 param2;
- u16 dlen;
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
} s;
};
@@ -123,16 +123,16 @@ struct otx_cpt_sglist_component {
union {
u64 len;
struct {
- u16 len0;
- u16 len1;
- u16 len2;
- u16 len3;
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
} s;
} u;
- u64 ptr0;
- u64 ptr1;
- u64 ptr2;
- u64 ptr3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
};
struct otx_cpt_pending_entry {
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 78d660d963e2..4ad3571ab6af 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -137,8 +137,6 @@ struct mtk_aes_gcm_ctx {
u32 authsize;
size_t textlen;
-
- struct crypto_skcipher *ctr;
};
struct mtk_aes_drv {
@@ -996,17 +994,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct crypto_skcipher *ctr = gctx->ctr;
- struct {
- u32 hash[4];
- u8 iv[8];
-
- struct crypto_wait wait;
-
- struct scatterlist sg[1];
- struct skcipher_request req;
- } *data;
+ u8 hash[AES_BLOCK_SIZE] __aligned(4) = {};
+ struct crypto_aes_ctx aes_ctx;
int err;
switch (keylen) {
@@ -1026,39 +1015,18 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
ctx->keylen = SIZE_IN_WORDS(keylen);
- /* Same as crypto_gcm_setkey() from crypto/gcm.c */
- crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctr, key, keylen);
+ err = aes_expandkey(&aes_ctx, key, keylen);
if (err)
return err;
- data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- crypto_init_wait(&data->wait);
- sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
- skcipher_request_set_tfm(&data->req, ctr);
- skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &data->wait);
- skcipher_request_set_crypt(&data->req, data->sg, data->sg,
- AES_BLOCK_SIZE, data->iv);
-
- err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
- &data->wait);
- if (err)
- goto out;
+ aes_encrypt(&aes_ctx, hash, hash);
+ memzero_explicit(&aes_ctx, sizeof(aes_ctx));
mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
- mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
+ mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash,
AES_BLOCK_SIZE);
-out:
- kzfree(data);
- return err;
+
+ return 0;
}
static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
@@ -1095,32 +1063,17 @@ static int mtk_aes_gcm_init(struct crypto_aead *aead)
{
struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
- ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->ctr)) {
- pr_err("Error allocating ctr(aes)\n");
- return PTR_ERR(ctx->ctr);
- }
-
crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_gcm_start;
return 0;
}
-static void mtk_aes_gcm_exit(struct crypto_aead *aead)
-{
- struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
-
- crypto_free_skcipher(ctx->ctr);
-}
-
static struct aead_alg aes_gcm_alg = {
.setkey = mtk_aes_gcm_setkey,
.setauthsize = mtk_aes_gcm_setauthsize,
.encrypt = mtk_aes_gcm_encrypt,
.decrypt = mtk_aes_gcm_decrypt,
.init = mtk_aes_gcm_init,
- .exit = mtk_aes_gcm_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d84530293036..909a7eb748e3 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -97,7 +97,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -105,6 +105,7 @@ struct dcp_async_ctx {
struct dcp_aes_req_ctx {
unsigned int enc:1;
unsigned int ecb:1;
+ struct skcipher_request fallback_req; // keep at the end
};
struct dcp_sha_req_ctx {
@@ -426,21 +427,20 @@ static int dcp_chan_thread_aes(void *data)
static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
req->cryptlen, req->iv);
if (enc)
- ret = crypto_skcipher_encrypt(subreq);
+ ret = crypto_skcipher_encrypt(&rctx->fallback_req);
else
- ret = crypto_skcipher_decrypt(subreq);
-
- skcipher_request_zero(subreq);
+ ret = crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -510,24 +510,25 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(actx->fallback,
+ crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(actx->fallback, key, len);
+ return crypto_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
actx->fallback = blk;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
+ crypto_skcipher_reqsize(blk));
return 0;
}
@@ -535,7 +536,7 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(actx->fallback);
+ crypto_free_skcipher(actx->fallback);
}
/*
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 6a828bbecea4..d8aec5153b21 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1382,7 +1382,8 @@ static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
alg->base.cra_blocksize = tmpl->block_size;
p->enc_type = tmpl->enc_type;
alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index b5aff20c5900..4fd14d90cc40 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -139,7 +139,7 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
for (i = 0; i < key32; i++) {
omap_aes_write(dd, AES_REG_KEY(dd, i),
- __le32_to_cpu(dd->ctx->key[i]));
+ (__force u32)cpu_to_le32(dd->ctx->key[i]));
}
if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
@@ -363,7 +363,7 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
{
int err;
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zu\n", dd->total);
if (!dd->pio_only) {
err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
@@ -409,7 +409,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
{
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zu\n", dd->total);
omap_aes_dma_stop(dd);
@@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->cryptlen < aes_fallback_sz) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
- ret = crypto_skcipher_encrypt(subreq);
+ ret = crypto_skcipher_encrypt(&rctx->fallback_req);
else
- ret = crypto_skcipher_decrypt(subreq);
-
- skcipher_request_zero(subreq);
+ ret = crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
dd = omap_aes_find_dev(rctx);
@@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
ctx->fallback = blk;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
+ crypto_skcipher_reqsize(blk));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d111bf906e1..23d073e87bb8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -97,7 +97,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct omap_aes_gcm_ctx {
@@ -110,6 +110,7 @@ struct omap_aes_reqctx {
unsigned long mode;
u8 iv[AES_BLOCK_SIZE];
u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
+ struct skcipher_request fallback_req; // keep at the end
};
#define OMAP_AES_QUEUE_LENGTH 1
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 8eda43319204..c9d38bcfd1c7 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -87,7 +87,7 @@ struct omap_des_ctx {
struct omap_des_dev *dd;
int keylen;
- u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)];
+ __le32 key[(3 * DES_KEY_SIZE) / sizeof(u32)];
unsigned long flags;
};
@@ -461,7 +461,7 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
crypto_skcipher_reqtfm(dd->req));
int err;
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zd\n", dd->total);
if (!dd->pio_only) {
err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
@@ -504,7 +504,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
{
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zd\n", dd->total);
omap_des_dma_stop(dd);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 82691a057d2a..954d703f2981 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -357,10 +357,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
if (big_endian)
for (i = 0; i < d; i++)
- hash[i] = be32_to_cpu(in[i]);
+ hash[i] = be32_to_cpup((__be32 *)in + i);
else
for (i = 0; i < d; i++)
- hash[i] = le32_to_cpu(in[i]);
+ hash[i] = le32_to_cpup((__le32 *)in + i);
}
static int omap_sham_hw_init(struct omap_sham_dev *dd)
@@ -522,7 +522,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
int mlen;
struct sg_mapping_iter mi;
- dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
ctx->digcnt, length, final);
dd->pdata->write_ctrl(dd, length, final, 0);
@@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
struct dma_slave_config cfg;
int ret;
- dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
ctx->digcnt, length, final);
if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
@@ -871,7 +871,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
nbytes += req->nbytes - rctx->offset;
dev_dbg(rctx->dd->dev,
- "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
__func__, nbytes, bs, rctx->total, rctx->offset,
rctx->bufcnt);
@@ -932,7 +932,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
-struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
{
struct omap_sham_dev *dd;
@@ -1023,7 +1023,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
!(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
@@ -1036,7 +1036,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
err = omap_sham_xmit_dma(dd, ctx->total, final);
/* wait for dma completion before can take more data */
- dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
return err;
}
@@ -1097,7 +1097,7 @@ static int omap_sham_finish(struct ahash_request *req)
err = omap_sham_finish_hmac(req);
}
- dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+ dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
return err;
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 7384e91c8b32..dac6eb37fff9 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -86,6 +86,7 @@ struct spacc_req {
dma_addr_t src_addr, dst_addr;
struct spacc_ddt *src_ddt, *dst_ddt;
void (*complete)(struct spacc_req *req);
+ struct skcipher_request fallback_req; // keep at the end
};
struct spacc_aead {
@@ -158,7 +159,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_sync_skcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -792,13 +793,13 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->sw_cipher,
+ crypto_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
}
@@ -900,7 +901,7 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ struct spacc_req *dev_req = skcipher_request_ctx(req);
int err;
/*
@@ -908,13 +909,13 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
- skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
+ skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
+ skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
req->cryptlen, req->iv);
- err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
+ crypto_skcipher_decrypt(&dev_req->fallback_req);
return err;
}
@@ -1007,19 +1008,24 @@ static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_sync_skcipher(
- alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->base.cra_name);
return PTR_ERR(ctx->sw_cipher);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
+ crypto_skcipher_reqsize(ctx->sw_cipher));
+ } else {
+ /* take the size without the fallback skcipher_request at the end */
+ crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
+ fallback_req));
}
+
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req));
-
return 0;
}
@@ -1027,7 +1033,7 @@ static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->sw_cipher);
+ crypto_free_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct skcipher_request *req)
@@ -1226,6 +1232,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1251,6 +1258,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1274,7 +1282,8 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "cbc-des-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
.base.cra_module = THIS_MODULE,
@@ -1298,7 +1307,8 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "ecb-des-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
.base.cra_module = THIS_MODULE,
@@ -1321,6 +1331,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "cbc-des3-ede-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1345,6 +1356,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "ecb-des3-ede-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1376,6 +1388,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1406,6 +1419,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1436,6 +1450,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1466,6 +1481,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1497,6 +1513,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1527,6 +1544,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1556,6 +1574,7 @@ static struct spacc_alg l2_engine_algs[] = {
.base.cra_driver_name = "f8-kasumi-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = 8,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 6bc68bc00d76..aee494d3da52 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index afc9a0a86747..8b5dd2c94ebf 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index d937cc7248a5..020d099409e5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index d2d0ae445fd8..d2fedbd7113c 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index 934f216acf39..7945a9cd1c60 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_C3XXXVF_HW_DATA_H_
#define ADF_C3XXXVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1dc5ac859f7b..11039fe55f61 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 618cec360b39..844ad5ed33fc 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index 17a8a32d5c63..88504d2bf30d 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 2bc06c89d2fe..4ba9c14383af 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 38e4bc04f407..29fd3f1091ab 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index a28d83e77422..a6c04cf7a43c 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_C62XVF_HW_DATA_H_
#define ADF_C62XVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index a68358b31292..b8b021d54bb5 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 33f0a6251e38..c1db8c26afb6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/interrupt.h>
@@ -103,8 +59,8 @@ struct adf_accel_pci {
struct pci_dev *pci_dev;
struct adf_accel_msix msix_entries;
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
- uint8_t revid;
- uint8_t sku;
+ u8 revid;
+ u8 sku;
} __packed;
enum dev_state {
@@ -144,7 +100,7 @@ static inline const char *get_sku_info(enum dev_sku_info info)
struct adf_hw_device_class {
const char *name;
const enum adf_device_type type;
- uint32_t instances;
+ u32 instances;
} __packed;
struct adf_cfg_device_data;
@@ -154,15 +110,15 @@ struct adf_etr_ring_data;
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
- uint32_t (*get_accel_mask)(uint32_t fuse);
- uint32_t (*get_ae_mask)(uint32_t fuse);
- uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
- uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
- uint32_t (*get_pf2vf_offset)(uint32_t i);
- uint32_t (*get_vintmsk_offset)(uint32_t i);
+ u32 (*get_accel_mask)(u32 fuse);
+ u32 (*get_ae_mask)(u32 fuse);
+ u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_num_aes)(struct adf_hw_device_data *self);
+ u32 (*get_num_accels)(struct adf_hw_device_data *self);
+ u32 (*get_pf2vf_offset)(u32 i);
+ u32 (*get_vintmsk_offset)(u32 i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
@@ -173,25 +129,25 @@ struct adf_hw_device_data {
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
- const uint32_t **cfg);
+ const u32 **cfg);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
- uint32_t fuses;
- uint32_t accel_capabilities_mask;
- uint32_t instance_id;
- uint16_t accel_mask;
- uint16_t ae_mask;
- uint16_t tx_rings_mask;
- uint8_t tx_rx_gap;
- uint8_t num_banks;
- uint8_t num_accel;
- uint8_t num_logical_accel;
- uint8_t num_engines;
- uint8_t min_iov_compat_ver;
+ u32 fuses;
+ u32 accel_capabilities_mask;
+ u32 instance_id;
+ u16 accel_mask;
+ u16 ae_mask;
+ u16 tx_rings_mask;
+ u8 tx_rx_gap;
+ u8 num_banks;
+ u8 num_accel;
+ u8 num_logical_accel;
+ u8 num_engines;
+ u8 min_iov_compat_ver;
} __packed;
/* CSR write macro */
@@ -248,8 +204,8 @@ struct adf_accel_dev {
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
- uint8_t compatible;
- uint8_t pf_version;
+ u8 compatible;
+ u8 pf_version;
} vf;
};
bool is_vf;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index a42fc42704be..c8ad85b882be 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
@@ -118,7 +74,7 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
if (!hw_data->fw_name)
return 0;
@@ -139,7 +95,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
if (!hw_data->fw_name)
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index d28cba34773e..1c8ca151a963 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -1,53 +1,9 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
@@ -60,6 +16,9 @@
#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
+#define ADF_CONST_TABLE_SIZE 1024
+#define ADF_ADMIN_POLL_DELAY_US 20
+#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -154,11 +113,13 @@ struct adf_admin_comms {
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
void *in, void *out)
{
+ int ret;
+ u32 status;
struct adf_admin_comms *admin = accel_dev->admin;
int offset = ae * ADF_ADMINMSG_LEN * 2;
void __iomem *mailbox = admin->mailbox_addr;
int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
- int times, received;
+ struct icp_qat_fw_init_admin_req *request = in;
mutex_lock(&admin->lock);
@@ -169,46 +130,71 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
ADF_CSR_WR(mailbox, mb_offset, 1);
- received = 0;
- for (times = 0; times < 50; times++) {
- msleep(20);
- if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
- received = 1;
- break;
- }
- }
- if (received)
+
+ ret = readl_poll_timeout(mailbox + mb_offset, status,
+ status == 0, ADF_ADMIN_POLL_DELAY_US,
+ ADF_ADMIN_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ /* Response timeout */
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg %d to accelerator %d\n",
+ request->cmd_id, ae);
+ } else {
+ /* Response received from admin message, we can now
+ * make response data available in "out" parameter.
+ */
memcpy(out, admin->virt_addr + offset +
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
- else
- dev_err(&GET_DEV(accel_dev),
- "Failed to send admin msg to accelerator\n");
+ }
mutex_unlock(&admin->lock);
- return received ? 0 : -EFAULT;
+ return ret;
+}
+
+static int adf_send_admin(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_req *req,
+ struct icp_qat_fw_init_admin_resp *resp,
+ const unsigned long ae_mask)
+{
+ u32 ae;
+
+ for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
+ if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
+ resp->status)
+ return -EFAULT;
+
+ return 0;
}
-static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+static int adf_init_me(struct adf_accel_dev *accel_dev)
{
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_INIT_ME;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
+{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
- int i;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->ae_mask;
- memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
- req.init_admin_cmd_id = cmd;
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
- if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
- req.init_cfg_sz = 1024;
- req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
- }
- for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
- memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
- if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
- resp.init_resp_hdr.status)
- return -EFAULT;
- }
- return 0;
+ req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
/**
@@ -221,11 +207,13 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
- int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+ int ret;
+ ret = adf_init_me(accel_dev);
if (ret)
return ret;
- return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+
+ return adf_set_fw_constants(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index f5e960d23a7a..32102e27e559 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/aer.h>
@@ -86,7 +42,7 @@ void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
- uint16_t bridge_ctl = 0;
+ u16 bridge_ctl = 0;
if (!parent)
parent = pdev;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index 5c7fdb0fc53d..ac462796cefc 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/list.h>
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
index 6a9c6f6b5ec9..376cde61a60e 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_H_
#define ADF_CFG_H_
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 1211261de7c2..1ef46ccfba47 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_COMMON_H_
#define ADF_CFG_COMMON_H_
@@ -81,16 +37,16 @@ enum adf_device_type {
struct adf_dev_status_info {
enum adf_device_type type;
- u32 accel_id;
- u32 instance_id;
- uint8_t num_ae;
- uint8_t num_accel;
- uint8_t num_logical_accel;
- uint8_t banks_per_accel;
- uint8_t state;
- uint8_t bus;
- uint8_t dev;
- uint8_t fun;
+ __u32 accel_id;
+ __u32 instance_id;
+ __u8 num_ae;
+ __u8 num_accel;
+ __u8 num_logical_accel;
+ __u8 banks_per_accel;
+ __u8 state;
+ __u8 bus;
+ __u8 dev;
+ __u8 fun;
char name[MAX_DEVICE_NAME_SIZE];
};
@@ -101,6 +57,6 @@ struct adf_dev_status_info {
struct adf_user_cfg_ctl_data)
#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
struct adf_user_cfg_ctl_data)
-#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
-#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 7632ed0f25c5..314790f5b0af 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_STRINGS_H_
#define ADF_CFG_STRINGS_H_
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
index b5484bfa6996..421f4fb8b4dd 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_USER_H_
#define ADF_CFG_USER_H_
@@ -55,7 +11,7 @@ struct adf_user_cfg_key_val {
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *next;
- uint64_t padding3;
+ __u64 padding3;
};
enum adf_cfg_val_type type;
} __packed;
@@ -64,19 +20,19 @@ struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *params;
- uint64_t padding1;
+ __u64 padding1;
};
union {
struct adf_user_cfg_section *next;
- uint64_t padding3;
+ __u64 padding3;
};
} __packed;
struct adf_user_cfg_ctl_data {
union {
struct adf_user_cfg_section *config_section;
- uint64_t padding;
+ __u64 padding;
};
- uint8_t device_id;
+ __u8 device_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index d78f8d5c89c3..ebfcb4ea618d 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_DRV_H
#define ADF_DRV_H
@@ -123,11 +79,11 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
struct adf_accel_dev *adf_devmgr_get_first(void);
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
-int adf_devmgr_verify_id(uint32_t id);
-void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_verify_id(u32 id);
+void adf_devmgr_get_num_dev(u32 *num);
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
int adf_dev_started(struct adf_accel_dev *accel_dev);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
@@ -198,7 +154,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask, unsigned int upc);
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword);
+ unsigned int words_num, u64 *uword);
void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
unsigned int uword_addr, unsigned int words_num,
unsigned int *data);
@@ -233,9 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- uint32_t vf_mask);
+ u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- uint32_t vf_mask);
+ u32 vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index ef0e482ee04f..71d0c44aacca 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -270,7 +226,7 @@ static int adf_ctl_is_device_in_use(int id)
return 0;
}
-static void adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(u32 id)
{
struct adf_accel_dev *accel_dev;
@@ -374,7 +330,7 @@ out:
static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- uint32_t num_devices = 0;
+ u32 num_devices = 0;
adf_devmgr_get_num_dev(&num_devices);
if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 2d06409bd3c4..72753af056b3 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include "adf_cfg.h"
@@ -52,7 +8,7 @@
static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
-static uint32_t num_devices;
+static u32 num_devices;
static u8 id_map[ADF_MAX_DEVICES];
struct vf_id_map {
@@ -355,7 +311,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
}
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
{
struct list_head *itr;
int real_id;
@@ -380,7 +336,7 @@ unlock:
return NULL;
}
-int adf_devmgr_verify_id(uint32_t id)
+int adf_devmgr_verify_id(u32 id)
{
if (id == ADF_CFG_ALL_DEVICES)
return 0;
@@ -407,7 +363,7 @@ static int adf_get_num_dettached_vfs(void)
return vfs;
}
-void adf_devmgr_get_num_dev(uint32_t *num)
+void adf_devmgr_get_num_dev(u32 *num)
{
*num = num_devices - adf_get_num_dettached_vfs();
}
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index d7dd18d9bef8..d4162783f970 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport_internal.h"
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 26556c713049..42029153408e 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/bitops.h>
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index cd1cdf5305bc..36136f7db509 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index b3875fdf6cd7..519fd5acf713 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -1,50 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
index 5acd531a11ff..0690c031bfce 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_PF2VF_MSG_H
#define ADF_PF2VF_MSG_H
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index b36d8653b1ba..8827aa139f96 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 2136cbe4bf6c..2ad774017200 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
@@ -51,22 +7,22 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+static inline u32 adf_modulo(u32 data, u32 shift)
{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
+ u32 div = data >> shift;
+ u32 mult = div << shift;
return data - mult;
}
-static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+static inline int adf_check_ring_alignment(u64 addr, u64 size)
{
if (((size - 1) & addr) != 0)
return -EFAULT;
return 0;
}
-static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
{
int i = ADF_MIN_RING_SIZE;
@@ -77,7 +33,7 @@ static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
return ADF_DEFAULT_RING_SIZE;
}
-static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
if (bank->ring_mask & (1 << ring)) {
@@ -89,14 +45,14 @@ static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
return 0;
}
-static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
bank->ring_mask &= ~(1 << ring);
spin_unlock(&bank->lock);
}
-static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock_bh(&bank->lock);
bank->irq_mask |= (1 << ring);
@@ -106,7 +62,7 @@ static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
bank->irq_coalesc_timer);
}
-static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock_bh(&bank->lock);
bank->irq_mask &= ~(1 << ring);
@@ -114,7 +70,7 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
}
-int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
if (atomic_add_return(1, ring->inflights) >
ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
@@ -136,18 +92,18 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
static int adf_handle_response(struct adf_etr_ring_data *ring)
{
- uint32_t msg_counter = 0;
- uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+ u32 msg_counter = 0;
+ u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
while (*msg != ADF_RING_EMPTY_SIG) {
- ring->callback((uint32_t *)msg);
+ ring->callback((u32 *)msg);
atomic_dec(ring->inflights);
*msg = ADF_RING_EMPTY_SIG;
ring->head = adf_modulo(ring->head +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
msg_counter++;
- msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+ msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}
if (msg_counter > 0)
WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
@@ -158,7 +114,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring)
static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+ u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
ring->ring_number, ring_config);
@@ -166,7 +122,7 @@ static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_config =
+ u32 ring_config =
BUILD_RESP_RING_CONFIG(ring->ring_size,
ADF_RING_NEAR_WATERMARK_512,
ADF_RING_NEAR_WATERMARK_0);
@@ -180,8 +136,8 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
struct adf_etr_bank_data *bank = ring->bank;
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint64_t ring_base;
- uint32_t ring_size_bytes =
+ u64 ring_base;
+ u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
@@ -215,7 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_size_bytes =
+ u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
@@ -228,8 +184,8 @@ static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
}
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
- uint32_t bank_num, uint32_t num_msgs,
- uint32_t msg_size, const char *ring_name,
+ u32 bank_num, u32 num_msgs,
+ u32 msg_size, const char *ring_name,
adf_callback_fn callback, int poll_mode,
struct adf_etr_ring_data **ring_ptr)
{
@@ -237,7 +193,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
- uint32_t ring_num;
+ u32 ring_num;
int ret;
if (bank_num >= GET_MAX_BANKS(accel_dev)) {
@@ -330,7 +286,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
{
- uint32_t empty_rings, i;
+ u32 empty_rings, i;
empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
empty_rings = ~empty_rings & bank->irq_mask;
@@ -353,7 +309,7 @@ void adf_response_handler(uintptr_t bank_addr)
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
const char *section, const char *format,
- uint32_t key, uint32_t *value)
+ u32 key, u32 *value)
{
char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
@@ -370,7 +326,7 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
const char *section,
- uint32_t bank_num_in_accel)
+ u32 bank_num_in_accel)
{
if (adf_get_cfg_int(bank->accel_dev, section,
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -384,12 +340,12 @@ static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
static int adf_init_bank(struct adf_accel_dev *accel_dev,
struct adf_etr_bank_data *bank,
- uint32_t bank_num, void __iomem *csr_addr)
+ u32 bank_num, void __iomem *csr_addr)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
- uint32_t i, coalesc_enabled = 0;
+ u32 i, coalesc_enabled = 0;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
@@ -461,8 +417,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
struct adf_etr_data *etr_data;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
- uint32_t size;
- uint32_t num_banks = 0;
+ u32 size;
+ u32 num_banks = 0;
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
@@ -508,7 +464,7 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data);
static void cleanup_bank(struct adf_etr_bank_data *bank)
{
- uint32_t i;
+ u32 i;
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
struct adf_accel_dev *accel_dev = bank->accel_dev;
@@ -528,7 +484,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank)
static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data = accel_dev->transport;
- uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+ u32 i, num_banks = GET_MAX_BANKS(accel_dev);
for (i = 0; i < num_banks; i++)
cleanup_bank(&etr_data->banks[i]);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
index 386485bd9c95..2c95f1697c76 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_H
#define ADF_TRANSPORT_H
@@ -54,10 +10,10 @@ struct adf_etr_ring_data;
typedef void (*adf_callback_fn)(void *resp_msg);
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
- uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+ u32 bank_num, u32 num_mgs, u32 msg_size,
const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr);
-int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 80e02a2a0a09..950d1988556c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
@@ -132,9 +88,9 @@
ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
do { \
- uint32_t l_base = 0, u_base = 0; \
- l_base = (uint32_t)(value & 0xFFFFFFFF); \
- u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)(value & 0xFFFFFFFF); \
+ u_base = (u32)((value & 0xFFFFFFFF00000000ULL) >> 32); \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e794e9d97b2c..2a2eccbf56ec 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index bb883368ac01..c7faf4e2d302 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_INTRN_H
#define ADF_TRANSPORT_INTRN_H
@@ -59,32 +15,31 @@ struct adf_etr_ring_debug_entry {
struct adf_etr_ring_data {
void *base_addr;
atomic_t *inflights;
- spinlock_t lock; /* protects ring data struct */
adf_callback_fn callback;
struct adf_etr_bank_data *bank;
dma_addr_t dma_addr;
- uint16_t head;
- uint16_t tail;
- uint8_t ring_number;
- uint8_t ring_size;
- uint8_t msg_size;
- uint8_t reserved;
struct adf_etr_ring_debug_entry *ring_debug;
-} __packed;
+ spinlock_t lock; /* protects ring data struct */
+ u16 head;
+ u16 tail;
+ u8 ring_number;
+ u8 ring_size;
+ u8 msg_size;
+};
struct adf_etr_bank_data {
struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
struct tasklet_struct resp_handler;
void __iomem *csr_addr;
- struct adf_accel_dev *accel_dev;
- uint32_t irq_coalesc_timer;
- uint16_t ring_mask;
- uint16_t irq_mask;
+ u32 irq_coalesc_timer;
+ u32 bank_number;
+ u16 ring_mask;
+ u16 irq_mask;
spinlock_t lock; /* protects bank data struct */
+ struct adf_accel_dev *accel_dev;
struct dentry *bank_debug_dir;
struct dentry *bank_debug_cfg;
- uint32_t bank_number;
-} __packed;
+};
struct adf_etr_data {
struct adf_etr_bank_data *banks;
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index cd5f37dffe8a..2c98fb63f7b7 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 4a73fc70f7a9..c4a44dc6af3e 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index 46747f01b1d1..6dc09d270082 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_H_
#define _ICP_QAT_FW_H_
#include <linux/types.h>
@@ -89,41 +45,41 @@ enum icp_qat_fw_comn_request_id {
struct icp_qat_fw_comn_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t serv_specif_fields[4];
+ __u32 serv_specif_fields[4];
} s1;
} u;
};
struct icp_qat_fw_comn_req_mid {
- uint64_t opaque_data;
- uint64_t src_data_addr;
- uint64_t dest_data_addr;
- uint32_t src_length;
- uint32_t dst_length;
+ __u64 opaque_data;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
+ __u32 src_length;
+ __u32 dst_length;
};
struct icp_qat_fw_comn_req_cd_ctrl {
- uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+ __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
};
struct icp_qat_fw_comn_req_hdr {
- uint8_t resrvd1;
- uint8_t service_cmd_id;
- uint8_t service_type;
- uint8_t hdr_flags;
- uint16_t serv_specif_flags;
- uint16_t comn_req_flags;
+ __u8 resrvd1;
+ __u8 service_cmd_id;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 serv_specif_flags;
+ __u16 comn_req_flags;
};
struct icp_qat_fw_comn_req_rqpars {
- uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+ __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
};
struct icp_qat_fw_comn_req {
@@ -135,24 +91,24 @@ struct icp_qat_fw_comn_req {
};
struct icp_qat_fw_comn_error {
- uint8_t xlat_err_code;
- uint8_t cmp_err_code;
+ __u8 xlat_err_code;
+ __u8 cmp_err_code;
};
struct icp_qat_fw_comn_resp_hdr {
- uint8_t resrvd1;
- uint8_t service_id;
- uint8_t response_type;
- uint8_t hdr_flags;
+ __u8 resrvd1;
+ __u8 service_id;
+ __u8 response_type;
+ __u8 hdr_flags;
struct icp_qat_fw_comn_error comn_error;
- uint8_t comn_status;
- uint8_t cmd_id;
+ __u8 comn_status;
+ __u8 cmd_id;
};
struct icp_qat_fw_comn_resp {
struct icp_qat_fw_comn_resp_hdr comn_hdr;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
};
#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index 72a59faa9005..d4d188cd7ed0 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
#define _ICP_QAT_FW_INIT_ADMIN_H_
@@ -67,50 +23,75 @@ enum icp_qat_fw_init_admin_resp_status {
};
struct icp_qat_fw_init_admin_req {
- uint16_t init_cfg_sz;
- uint8_t resrvd1;
- uint8_t init_admin_cmd_id;
- uint32_t resrvd2;
- uint64_t opaque_data;
- uint64_t init_cfg_ptr;
- uint64_t resrvd3;
-};
-
-struct icp_qat_fw_init_admin_resp_hdr {
- uint8_t flags;
- uint8_t resrvd1;
- uint8_t status;
- uint8_t init_admin_cmd_id;
-};
+ __u16 init_cfg_sz;
+ __u8 resrvd1;
+ __u8 cmd_id;
+ __u32 resrvd2;
+ __u64 opaque_data;
+ __u64 init_cfg_ptr;
-struct icp_qat_fw_init_admin_resp_pars {
union {
- uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
struct {
- uint32_t version_patch_num;
- uint8_t context_id;
- uint8_t ae_id;
- uint16_t resrvd1;
- uint64_t resrvd2;
- } s1;
- struct {
- uint64_t req_rec_count;
- uint64_t resp_sent_count;
- } s2;
- } u;
+ __u16 ibuf_size_in_kb;
+ __u16 resrvd3;
+ };
+ __u32 idle_filter;
+ };
+
+ __u32 resrvd4;
};
struct icp_qat_fw_init_admin_resp {
- struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+ __u8 flags;
+ __u8 resrvd1;
+ __u8 status;
+ __u8 cmd_id;
union {
- uint32_t resrvd2;
+ __u32 resrvd2;
+ struct {
+ __u16 version_minor_num;
+ __u16 version_major_num;
+ };
+ };
+ __u64 opaque_data;
+ union {
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ __u32 version_patch_num;
+ __u8 context_id;
+ __u8 ae_id;
+ __u16 resrvd4;
+ __u64 resrvd5;
+ };
+ struct {
+ __u64 req_rec_count;
+ __u64 resp_sent_count;
+ };
+ struct {
+ __u16 compression_algos;
+ __u16 checksum_algos;
+ __u32 deflate_capabilities;
+ __u32 resrvd6;
+ __u32 lzs_capabilities;
+ };
+ struct {
+ __u32 cipher_algos;
+ __u32 hash_algos;
+ __u16 keygen_algos;
+ __u16 other;
+ __u16 public_key_algos;
+ __u16 prime_algos;
+ };
+ struct {
+ __u64 timestamp;
+ __u64 resrvd7;
+ };
struct {
- uint16_t version_minor_num;
- uint16_t version_major_num;
- } s;
- } u;
- uint64_t opaque_data;
- struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+ __u32 successful_count;
+ __u32 unsuccessful_count;
+ __u64 resrvd8;
+ };
+ };
};
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
index c8d26697e8ea..6757ec09d81f 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_LA_H_
#define _ICP_QAT_FW_LA_H_
#include "icp_qat_fw.h"
@@ -226,14 +182,14 @@ struct icp_qat_fw_la_bulk_req {
struct icp_qat_fw_cipher_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
} s1;
} u;
};
@@ -241,70 +197,70 @@ struct icp_qat_fw_cipher_req_hdr_cd_pars {
struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
} sl;
} u;
};
struct icp_qat_fw_cipher_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id;
- uint8_t cipher_padding_sz;
- uint8_t resrvd1;
- uint16_t resrvd2;
- uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id;
+ __u8 cipher_padding_sz;
+ __u8 resrvd1;
+ __u16 resrvd2;
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
};
struct icp_qat_fw_auth_cd_ctrl_hdr {
- uint32_t resrvd1;
- uint8_t resrvd2;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id;
- uint8_t resrvd3;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd4;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
+ __u32 resrvd1;
+ __u8 resrvd2;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id;
+ __u8 resrvd3;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd4;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
};
struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id_cipher;
- uint8_t cipher_padding_sz;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id_auth;
- uint8_t resrvd1;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd2;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id_cipher;
+ __u8 cipher_padding_sz;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id_auth;
+ __u8 resrvd1;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd2;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
};
#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
@@ -315,48 +271,48 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
struct icp_qat_fw_la_cipher_req_params {
- uint32_t cipher_offset;
- uint32_t cipher_length;
+ __u32 cipher_offset;
+ __u32 cipher_length;
union {
- uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
struct {
- uint64_t cipher_IV_ptr;
- uint64_t resrvd1;
+ __u64 cipher_IV_ptr;
+ __u64 resrvd1;
} s;
} u;
};
struct icp_qat_fw_la_auth_req_params {
- uint32_t auth_off;
- uint32_t auth_len;
+ __u32 auth_off;
+ __u32 auth_len;
union {
- uint64_t auth_partial_st_prefix;
- uint64_t aad_adr;
+ __u64 auth_partial_st_prefix;
+ __u64 aad_adr;
} u1;
- uint64_t auth_res_addr;
+ __u64 auth_res_addr;
union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
} u2;
- uint8_t resrvd1;
- uint8_t hash_state_sz;
- uint8_t auth_res_sz;
+ __u8 resrvd1;
+ __u8 hash_state_sz;
+ __u8 auth_res_sz;
} __packed;
struct icp_qat_fw_la_auth_req_params_resrvd_flds {
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
} u2;
- uint8_t resrvd1;
- uint16_t resrvd2;
+ __u8 resrvd1;
+ __u16 resrvd2;
};
struct icp_qat_fw_la_resp {
struct icp_qat_fw_comn_resp_hdr comn_resp;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
};
#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
index 2ffef3e4fd68..3e8e291cd122 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
#define __ICP_QAT_FW_LOADER_HANDLE_H__
#include "icp_qat_uclo.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
index 0d7a9b51ce9f..9dddae0009fc 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -1,100 +1,56 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_PKE_
#define _ICP_QAT_FW_PKE_
#include "icp_qat_fw.h"
struct icp_qat_fw_req_hdr_pke_cd_pars {
- u64 content_desc_addr;
- u32 content_desc_resrvd;
- u32 func_id;
+ __u64 content_desc_addr;
+ __u32 content_desc_resrvd;
+ __u32 func_id;
};
struct icp_qat_fw_req_pke_mid {
- u64 opaque;
- u64 src_data_addr;
- u64 dest_data_addr;
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
};
struct icp_qat_fw_req_pke_hdr {
- u8 resrvd1;
- u8 resrvd2;
- u8 service_type;
- u8 hdr_flags;
- u16 comn_req_flags;
- u16 resrvd4;
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 comn_req_flags;
+ __u16 resrvd4;
struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
};
struct icp_qat_fw_pke_request {
struct icp_qat_fw_req_pke_hdr pke_hdr;
struct icp_qat_fw_req_pke_mid pke_mid;
- u8 output_param_count;
- u8 input_param_count;
- u16 resrvd1;
- u32 resrvd2;
- u64 next_req_adr;
+ __u8 output_param_count;
+ __u8 input_param_count;
+ __u16 resrvd1;
+ __u32 resrvd2;
+ __u64 next_req_adr;
};
struct icp_qat_fw_resp_pke_hdr {
- u8 resrvd1;
- u8 resrvd2;
- u8 response_type;
- u8 hdr_flags;
- u16 comn_resp_flags;
- u16 resrvd4;
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 response_type;
+ __u8 hdr_flags;
+ __u16 comn_resp_flags;
+ __u16 resrvd4;
};
struct icp_qat_fw_pke_resp {
struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
- u64 opaque;
- u64 src_data_addr;
- u64 dest_data_addr;
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
};
#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
index 7187917533d0..c0e9fc0c93dd 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hal.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_HAL_H
#define __ICP_QAT_HAL_H
#include "icp_qat_fw_loader_handle.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 121d5e6e46ca..c4b6ef1506ab 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
@@ -105,8 +61,8 @@ enum icp_qat_hw_auth_mode {
};
struct icp_qat_hw_auth_config {
- uint32_t config;
- uint32_t reserved;
+ __u32 config;
+ __u32 reserved;
};
#define QAT_AUTH_MODE_BITPOS 4
@@ -131,7 +87,7 @@ struct icp_qat_hw_auth_config {
struct icp_qat_hw_auth_counter {
__be32 counter;
- uint32_t reserved;
+ __u32 reserved;
};
#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
@@ -191,9 +147,9 @@ struct icp_qat_hw_auth_setup {
struct icp_qat_hw_auth_sha512 {
struct icp_qat_hw_auth_setup inner_setup;
- uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
struct icp_qat_hw_auth_setup outer_setup;
- uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+ __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
};
struct icp_qat_hw_auth_algo_blk {
@@ -227,8 +183,8 @@ enum icp_qat_hw_cipher_mode {
};
struct icp_qat_hw_cipher_config {
- uint32_t val;
- uint32_t reserved;
+ __u32 val;
+ __u32 reserved;
};
enum icp_qat_hw_cipher_dir {
@@ -296,7 +252,7 @@ enum icp_qat_hw_cipher_convert {
struct icp_qat_hw_cipher_aes256_f8 {
struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+ __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
};
struct icp_qat_hw_cipher_algo_blk {
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 5d1ee7e53492..8fe1ec344fa2 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_UCLO_H__
#define __ICP_QAT_UCLO_H__
@@ -176,7 +132,7 @@ struct icp_qat_uof_encap_obj {
struct icp_qat_uclo_encap_uwblock {
unsigned int start_addr;
unsigned int words_num;
- uint64_t micro_words;
+ u64 micro_words;
};
struct icp_qat_uclo_encap_page {
@@ -215,7 +171,7 @@ struct icp_qat_uclo_objhdr {
struct icp_qat_uof_strtable {
unsigned int table_len;
unsigned int reserved;
- uint64_t strings;
+ u64 strings;
};
struct icp_qat_uclo_objhandle {
@@ -235,7 +191,7 @@ struct icp_qat_uclo_objhandle {
unsigned int ae_num;
unsigned int ustore_phy_size;
void *obj_buf;
- uint64_t *uword_buf;
+ u64 *uword_buf;
};
struct icp_qat_uof_uword_block {
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index e14d3dd291f0..72753b84dc95 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
@@ -55,6 +11,7 @@
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -78,15 +35,15 @@ static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
+ u32 len;
+ u32 resrvd;
+ u64 addr;
} __packed;
struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
struct qat_alg_buf bufers[];
} __packed __aligned(64);
@@ -131,7 +88,8 @@ struct qat_alg_skcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_skcipher *tfm;
+ struct crypto_skcipher *ftfm;
+ bool fallback;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -151,7 +109,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
- const uint8_t *auth_key,
+ const u8 *auth_key,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
@@ -467,7 +425,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req,
struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -487,7 +445,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
@@ -500,7 +458,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
@@ -578,7 +536,7 @@ error:
}
static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
- const uint8_t *key,
+ const u8 *key,
unsigned int keylen,
int mode)
{
@@ -592,7 +550,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
return 0;
}
-static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -606,7 +564,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -658,7 +616,7 @@ out_free_inst:
return ret;
}
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -820,7 +778,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct aead_request *areq = qat_req->aead_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
@@ -835,7 +793,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct skcipher_request *sreq = qat_req->skcipher_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -880,18 +838,18 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -910,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- uint8_t *iv = areq->iv;
+ u8 *iv = areq->iv;
int ret, ctr = 0;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
@@ -922,11 +880,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
cipher_param->cipher_length = areq->cryptlen;
@@ -936,7 +894,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1038,6 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ if (keylen >> 1 == AES_KEYSIZE_192) {
+ ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->fallback = true;
+
+ return 0;
+ }
+
+ ctx->fallback = false;
+
return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_XTS_MODE);
}
@@ -1073,7 +1050,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1082,7 +1059,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1102,6 +1079,24 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
return qat_alg_skcipher_encrypt(req);
}
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_encrypt(nreq);
+ }
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
@@ -1133,7 +1128,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1142,7 +1137,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1161,6 +1156,25 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
return qat_alg_skcipher_decrypt(req);
}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_decrypt(nreq);
+ }
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1217,10 +1231,25 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+ return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int reqsize;
+
+ ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ reqsize = max(sizeof(struct qat_crypto_request),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(ctx->ftfm));
+ crypto_skcipher_set_reqsize(tfm, reqsize);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
- ctx->tfm = tfm;
return 0;
}
@@ -1251,13 +1280,22 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
qat_crypto_put_instance(inst);
}
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->ftfm)
+ crypto_free_skcipher(ctx->ftfm);
+
+ qat_alg_skcipher_exit_tfm(tfm);
+}
static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1274,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1291,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1309,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1327,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1345,17 +1383,18 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
- .init = qat_alg_skcipher_init_tfm,
- .exit = qat_alg_skcipher_exit_tfm,
+ .init = qat_alg_skcipher_init_xts_tfm,
+ .exit = qat_alg_skcipher_exit_xts_tfm,
.setkey = qat_alg_skcipher_xts_setkey,
- .decrypt = qat_alg_skcipher_blk_decrypt,
- .encrypt = qat_alg_skcipher_blk_encrypt,
+ .decrypt = qat_alg_skcipher_xts_decrypt,
+ .encrypt = qat_alg_skcipher_xts_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 692a7aaee749..846569ec9066 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1,50 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
@@ -384,12 +339,12 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
@@ -779,11 +734,11 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
@@ -927,7 +882,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
if (ctx->crt_mode)
msg->input_param_count = 6;
else
@@ -935,7 +890,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index fb504cee0305..ab621b7dbd20 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include "adf_accel_devices.h"
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index 300bb919a33a..12682d1e9f5f 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _QAT_CRYPTO_INSTANCE_H_
#define _QAT_CRYPTO_INSTANCE_H_
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index ff149e176f64..fa467e0f8285 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/delay.h>
@@ -78,13 +34,13 @@
#define AE(handle, ae) handle->hal_handle->aes[ae]
-static const uint64_t inst_4b[] = {
+static const u64 inst_4b[] = {
0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A021000000ull
};
-static const uint64_t inst[] = {
+static const u64 inst[] = {
0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
@@ -546,7 +502,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
}
-static uint64_t qat_hal_parity_64bit(uint64_t word)
+static u64 qat_hal_parity_64bit(u64 word)
{
word ^= word >> 1;
word ^= word >> 2;
@@ -557,9 +513,9 @@ static uint64_t qat_hal_parity_64bit(uint64_t word)
return word & 1;
}
-static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+static u64 qat_hal_set_uword_ecc(u64 uword)
{
- uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+ u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
bit6_mask = 0xdaf69a46910ULL;
@@ -578,7 +534,7 @@ static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword)
+ unsigned int words_num, u64 *uword)
{
unsigned int ustore_addr;
unsigned int i;
@@ -588,7 +544,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
unsigned int uwrd_lo, uwrd_hi;
- uint64_t tmp;
+ u64 tmp;
tmp = qat_hal_set_uword_ecc(uword[i]);
uwrd_lo = (unsigned int)(tmp & 0xffffffff);
@@ -644,7 +600,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
- (uint64_t *)inst);
+ (u64 *)inst);
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
@@ -821,7 +777,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword)
+ unsigned int words_num, u64 *uword)
{
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
@@ -871,11 +827,11 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
#define MAX_EXEC_INST 100
static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- uint64_t *micro_inst, unsigned int inst_num,
+ u64 *micro_inst, unsigned int inst_num,
int code_off, unsigned int max_cycle,
unsigned int *endpc)
{
- uint64_t savuwords[MAX_EXEC_INST];
+ u64 savuwords[MAX_EXEC_INST];
unsigned int ind_lm_addr0, ind_lm_addr1;
unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
unsigned int ind_cnt_sig;
@@ -972,7 +928,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
unsigned short reg_addr;
int status = 0;
- uint64_t insts, savuword;
+ u64 insts, savuword;
reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
if (reg_addr == BAD_REGADDR) {
@@ -984,7 +940,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = 0xA070000000ull | (reg_addr & 0x3ff);
break;
default:
- insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+ insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
@@ -1030,7 +986,7 @@ static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned short reg_num, unsigned int data)
{
unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
- uint64_t insts[] = {
+ u64 insts[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0F0000C0300ull,
@@ -1076,13 +1032,13 @@ int qat_hal_get_ins_num(void)
return ARRAY_SIZE(inst_4b);
}
-static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+static int qat_hal_concat_micro_code(u64 *micro_inst,
unsigned int inst_num, unsigned int size,
unsigned int addr, unsigned int *value)
{
int i;
unsigned int cur_value;
- const uint64_t *inst_arr;
+ const u64 *inst_arr;
int fixup_offset;
int usize = 0;
int orig_num;
@@ -1107,7 +1063,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- int *pfirst_exec, uint64_t *micro_inst,
+ int *pfirst_exec, u64 *micro_inst,
unsigned int inst_num)
{
int stat = 0;
@@ -1140,7 +1096,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_batch_init *lm_init_header)
{
struct icp_qat_uof_batch_init *plm_init;
- uint64_t *micro_inst_arry;
+ u64 *micro_inst_arry;
int micro_inst_num;
int alloc_inst_size;
int first_exec = 1;
@@ -1150,7 +1106,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
alloc_inst_size = lm_init_header->size;
if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
alloc_inst_size = handle->hal_handle->max_ustore;
- micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+ micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
GFP_KERNEL);
if (!micro_inst_arry)
return -ENOMEM;
@@ -1229,7 +1185,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
data16low;
unsigned short reg_mask;
int status = 0;
- uint64_t micro_inst[] = {
+ u64 micro_inst[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0A000000000ull,
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 6bd8f6a2a24f..bff759e2f811 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
@@ -332,13 +288,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
}
return 0;
out_err:
+ /* Do not free the list head unless we allocated it. */
+ tail_old = tail_old->next;
+ if (flag) {
+ kfree(*init_tab_base);
+ *init_tab_base = NULL;
+ }
+
while (tail_old) {
mem_init = tail_old->next;
kfree(tail_old);
tail_old = mem_init;
}
- if (flag)
- kfree(*init_tab_base);
return -ENOMEM;
}
@@ -411,16 +372,16 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_size;
unsigned int patt_pos;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- uint64_t *fill_data;
+ u64 *fill_data;
uof_image = image->img_ptr;
- fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+ fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
GFP_KERNEL);
if (!fill_data)
return -ENOMEM;
for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
memcpy(&fill_data[i], &uof_image->fill_pattern,
- sizeof(uint64_t));
+ sizeof(u64));
page = image->page;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
@@ -981,7 +942,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
pr_err("QAT: UOF incompatible\n");
return -EINVAL;
}
- obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
GFP_KERNEL);
if (!obj_handle->uword_buf)
return -ENOMEM;
@@ -1185,7 +1146,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
return 0;
}
-#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
+#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
@@ -1514,10 +1475,10 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
struct icp_qat_uclo_encap_page *encap_page,
- uint64_t *uword, unsigned int addr_p,
- unsigned int raddr, uint64_t fill)
+ u64 *uword, unsigned int addr_p,
+ unsigned int raddr, u64 fill)
{
- uint64_t uwrd = 0;
+ u64 uwrd = 0;
unsigned int i;
if (!encap_page) {
@@ -1547,12 +1508,12 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
{
unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- uint64_t fill_pat;
+ u64 fill_pat;
/* load the page starting at appropriate ustore address */
/* get fill-pattern from an image -- they are all the same */
memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
- sizeof(uint64_t));
+ sizeof(u64));
uw_physical_addr = encap_page->beg_addr_p;
uw_relative_addr = 0;
words_num = encap_page->micro_words_num;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1dfcab317bed..b975c263446d 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -1,62 +1,18 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
-static const uint32_t thrd_to_arb_map_sku4[] = {
+static const u32 thrd_to_arb_map_sku4[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x00000000, 0x00000000, 0x00000000, 0x00000000
};
-static const uint32_t thrd_to_arb_map_sku6[] = {
+static const u32 thrd_to_arb_map_sku6[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
@@ -68,20 +24,20 @@ static struct adf_hw_device_class dh895xcc_class = {
.instances = 0
};
-static uint32_t get_accel_mask(uint32_t fuse)
+static u32 get_accel_mask(u32 fuse)
{
return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
ADF_DH895XCC_ACCELERATORS_MASK;
}
-static uint32_t get_ae_mask(uint32_t fuse)
+static u32 get_ae_mask(u32 fuse)
{
return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
}
-static uint32_t get_num_accels(struct adf_hw_device_data *self)
+static u32 get_num_accels(struct adf_hw_device_data *self)
{
- uint32_t i, ctr = 0;
+ u32 i, ctr = 0;
if (!self || !self->accel_mask)
return 0;
@@ -93,9 +49,9 @@ static uint32_t get_num_accels(struct adf_hw_device_data *self)
return ctr;
}
-static uint32_t get_num_aes(struct adf_hw_device_data *self)
+static u32 get_num_aes(struct adf_hw_device_data *self)
{
- uint32_t i, ctr = 0;
+ u32 i, ctr = 0;
if (!self || !self->ae_mask)
return 0;
@@ -107,17 +63,17 @@ static uint32_t get_num_aes(struct adf_hw_device_data *self)
return ctr;
}
-static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_PMISC_BAR;
}
-static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_ETR_BAR;
}
-static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_SRAM_BAR;
}
@@ -161,12 +117,12 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
}
}
-static uint32_t get_pf2vf_offset(uint32_t i)
+static u32 get_pf2vf_offset(u32 i)
{
return ADF_DH895XCC_PF2VF_OFFSET(i);
}
-static uint32_t get_vintmsk_offset(uint32_t i)
+static u32 get_vintmsk_offset(u32 i)
{
return ADF_DH895XCC_VINTMSK_OFFSET(i);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 092f7353ed23..082a04466dca 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_DH895x_HW_DATA_H_
#define ADF_DH895x_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index b11bf8c0e683..4e877b75822b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index a3b4dd8099a7..5246f0524ca3 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 6ddc19bd4410..2bfcc67f8f39 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_DH895XVF_HW_DATA_H_
#define ADF_DH895XVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 1b762eefc6c1..7d6e1db272c2 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 7770660bc853..cffa9fc628ff 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -14,7 +14,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
/**
@@ -43,6 +43,7 @@ struct qce_cipher_reqctx {
struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
+ struct skcipher_request fallback_req; // keep at the end
};
static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 9f989cba0f1b..85ba16418a04 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -87,6 +87,8 @@ struct qce_alg_template {
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
+ const u8 *hash_zero;
+ const u32 digest_size;
};
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 1ab62e7d5f3c..c230843e2ffb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -203,10 +203,18 @@ static int qce_import_common(struct ahash_request *req, u64 in_count,
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- bool hmac = IS_SHA_HMAC(flags);
- int ret = -EINVAL;
+ struct qce_sha_reqctx *rctx;
+ unsigned long flags;
+ bool hmac;
+ int ret;
+
+ ret = qce_ahash_init(req);
+ if (ret)
+ return ret;
+
+ rctx = ahash_request_ctx(req);
+ flags = rctx->flags;
+ hmac = IS_SHA_HMAC(flags);
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
const struct sha1_state *state = in;
@@ -284,8 +292,6 @@ static int qce_ahash_update(struct ahash_request *req)
if (!sg_last)
return -EINVAL;
- sg_mark_end(sg_last);
-
if (rctx->buflen) {
sg_init_table(rctx->sg, 2);
sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
@@ -305,8 +311,12 @@ static int qce_ahash_final(struct ahash_request *req)
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
struct qce_device *qce = tmpl->qce;
- if (!rctx->buflen)
+ if (!rctx->buflen) {
+ if (tmpl->hash_zero)
+ memcpy(req->result, tmpl->hash_zero,
+ tmpl->alg.ahash.halg.digestsize);
return 0;
+ }
rctx->last_blk = true;
@@ -338,6 +348,13 @@ static int qce_ahash_digest(struct ahash_request *req)
rctx->first_blk = true;
rctx->last_blk = true;
+ if (!rctx->nbytes_orig) {
+ if (tmpl->hash_zero)
+ memcpy(req->result, tmpl->hash_zero,
+ tmpl->alg.ahash.halg.digestsize);
+ return 0;
+ }
+
return qce->async_req_enqueue(tmpl->qce, &req->base);
}
@@ -490,6 +507,11 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
alg->halg.digestsize = def->digestsize;
alg->halg.statesize = def->statesize;
+ if (IS_SHA1(def->flags))
+ tmpl->hash_zero = sha1_zero_message_hash;
+ else if (IS_SHA256(def->flags))
+ tmpl->hash_zero = sha256_zero_message_hash;
+
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 9412433f3b21..5630c5addd28 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -178,7 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
break;
}
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -235,16 +235,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
req->cryptlen <= aes_sw_max_len) ||
(IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
req->cryptlen % QCE_SECTOR_SIZE))) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -263,10 +262,9 @@ static int qce_skcipher_decrypt(struct skcipher_request *req)
static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ /* take the size without the fallback skcipher_request at the end */
+ crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
+ fallback_req));
return 0;
}
@@ -274,17 +272,21 @@ static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- qce_skcipher_init(tfm);
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0, CRYPTO_ALG_NEED_FALLBACK);
- return PTR_ERR_OR_ZERO(ctx->fallback);
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback));
+ return 0;
}
static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
struct qce_skcipher_def {
@@ -404,6 +406,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
new file mode 100644
index 000000000000..5bc099052bd2
--- /dev/null
+++ b/drivers/crypto/sa2ul.c
@@ -0,0 +1,2420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "sa2ul.h"
+
+/* Byte offset for key in encryption security context */
+#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
+/* Byte offset for Aux-1 in encryption security context */
+#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
+
+#define SA_CMDL_UPD_ENC 0x0001
+#define SA_CMDL_UPD_AUTH 0x0002
+#define SA_CMDL_UPD_ENC_IV 0x0004
+#define SA_CMDL_UPD_AUTH_IV 0x0008
+#define SA_CMDL_UPD_AUX_KEY 0x0010
+
+#define SA_AUTH_SUBKEY_LEN 16
+#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
+#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
+
+#define MODE_CONTROL_BYTES 27
+#define SA_HASH_PROCESSING 0
+#define SA_CRYPTO_PROCESSING 0
+#define SA_UPLOAD_HASH_TO_TLR BIT(6)
+
+#define SA_SW0_FLAGS_MASK 0xF0000
+#define SA_SW0_CMDL_INFO_MASK 0x1F00000
+#define SA_SW0_CMDL_PRESENT BIT(4)
+#define SA_SW0_ENG_ID_MASK 0x3E000000
+#define SA_SW0_DEST_INFO_PRESENT BIT(30)
+#define SA_SW2_EGRESS_LENGTH 0xFF000000
+#define SA_BASIC_HASH 0x10
+
+#define SHA256_DIGEST_WORDS 8
+/* Make 32-bit word from 4 bytes */
+#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
+ ((b2) << 8) | (b3))
+
+/* size of SCCTL structure in bytes */
+#define SA_SCCTL_SZ 16
+
+/* Max Authentication tag size */
+#define SA_MAX_AUTH_TAG_SZ 64
+
+#define PRIV_ID 0x1
+#define PRIV 0x1
+
+static struct device *sa_k3_dev;
+
+/**
+ * struct sa_cmdl_cfg - Command label configuration descriptor
+ * @aalg: authentication algorithm ID
+ * @enc_eng_id: Encryption Engine ID supported by the SA hardware
+ * @auth_eng_id: Authentication Engine ID
+ * @iv_size: Initialization Vector size
+ * @akey: Authentication key
+ * @akey_len: Authentication key length
+ * @enc: True, if this is an encode request
+ */
+struct sa_cmdl_cfg {
+ int aalg;
+ u8 enc_eng_id;
+ u8 auth_eng_id;
+ u8 iv_size;
+ const u8 *akey;
+ u16 akey_len;
+ bool enc;
+};
+
+/**
+ * struct algo_data - Crypto algorithm specific data
+ * @enc_eng: Encryption engine info structure
+ * @auth_eng: Authentication engine info structure
+ * @auth_ctrl: Authentication control word
+ * @hash_size: Size of digest
+ * @iv_idx: iv index in psdata
+ * @iv_out_size: iv out size
+ * @ealg_id: Encryption Algorithm ID
+ * @aalg_id: Authentication algorithm ID
+ * @mci_enc: Mode Control Instruction for Encryption algorithm
+ * @mci_dec: Mode Control Instruction for Decryption
+ * @inv_key: Whether the encryption algorithm demands key inversion
+ * @ctx: Pointer to the algorithm context
+ * @keyed_mac: Whether the authentication algorithm has key
+ * @prep_iopad: Function pointer to generate intermediate ipad/opad
+ */
+struct algo_data {
+ struct sa_eng_info enc_eng;
+ struct sa_eng_info auth_eng;
+ u8 auth_ctrl;
+ u8 hash_size;
+ u8 iv_idx;
+ u8 iv_out_size;
+ u8 ealg_id;
+ u8 aalg_id;
+ u8 *mci_enc;
+ u8 *mci_dec;
+ bool inv_key;
+ struct sa_tfm_ctx *ctx;
+ bool keyed_mac;
+ void (*prep_iopad)(struct algo_data *algo, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad);
+};
+
+/**
+ * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
+ * @type: Type of the crypto algorithm.
+ * @alg: Union of crypto algorithm definitions.
+ * @registered: Flag indicating if the crypto algorithm is already registered
+ */
+struct sa_alg_tmpl {
+ u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ struct aead_alg aead;
+ } alg;
+ bool registered;
+};
+
+/**
+ * struct sa_rx_data: RX Packet miscellaneous data place holder
+ * @req: crypto request data pointer
+ * @ddev: pointer to the DMA device
+ * @tx_in: dma_async_tx_descriptor pointer for rx channel
+ * @split_src_sg: Set if the src sg is split and needs to be freed up
+ * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @enc: Flag indicating either encryption or decryption
+ * @enc_iv_size: Initialisation vector size
+ * @iv_idx: Initialisation vector index
+ * @rx_sg: Static scatterlist entry for overriding RX data
+ * @tx_sg: Static scatterlist entry for overriding TX data
+ * @src: Source data pointer
+ * @dst: Destination data pointer
+ */
+struct sa_rx_data {
+ void *req;
+ struct device *ddev;
+ struct dma_async_tx_descriptor *tx_in;
+ struct scatterlist *split_src_sg;
+ struct scatterlist *split_dst_sg;
+ u8 enc;
+ u8 enc_iv_size;
+ u8 iv_idx;
+ struct scatterlist rx_sg;
+ struct scatterlist tx_sg;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+};
+
+/**
+ * struct sa_req: SA request definition
+ * @dev: device for the request
+ * @size: total data to the xmitted via DMA
+ * @enc_offset: offset of cipher data
+ * @enc_size: data to be passed to cipher engine
+ * @enc_iv: cipher IV
+ * @auth_offset: offset of the authentication data
+ * @auth_size: size of the authentication data
+ * @auth_iv: authentication IV
+ * @type: algorithm type for the request
+ * @cmdl: command label pointer
+ * @base: pointer to the base request
+ * @ctx: pointer to the algorithm context data
+ * @enc: true if this is an encode request
+ * @src: source data
+ * @dst: destination data
+ * @callback: DMA callback for the request
+ * @mdata_size: metadata size passed to DMA
+ */
+struct sa_req {
+ struct device *dev;
+ u16 size;
+ u8 enc_offset;
+ u16 enc_size;
+ u8 *enc_iv;
+ u8 auth_offset;
+ u16 auth_size;
+ u8 *auth_iv;
+ u32 type;
+ u32 *cmdl;
+ struct crypto_async_request *base;
+ struct sa_tfm_ctx *ctx;
+ bool enc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ dma_async_tx_callback callback;
+ u16 mdata_size;
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
+ { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for encryption
+ */
+static u8 mci_ecb_enc_array[3][27] = {
+ { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for decryption
+ */
+static u8 mci_ecb_dec_array[3][27] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for DES algorithm
+ * For CBC (Cipher Block Chaining) mode and ECB mode
+ * encryption and for decryption respectively
+ */
+static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+/*
+ * Perform 16 byte or 128 bit swizzling
+ * The SA2UL Expects the security context to
+ * be in little Endian and the bus width is 128 bits or 16 bytes
+ * Hence swap 16 bytes at a time from higher to lower address
+ */
+static void sa_swiz_128(u8 *in, u16 len)
+{
+ u8 data[16];
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ memcpy(data, &in[i], 16);
+ for (j = 0; j < 16; j++)
+ in[i + j] = data[15 - j];
+ }
+}
+
+/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
+static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++) {
+ k_ipad[i] = key[i] ^ 0x36;
+ k_opad[i] = key[i] ^ 0x5c;
+ }
+
+ /* Instead of XOR with 0 */
+ for (; i < SHA1_BLOCK_SIZE; i++) {
+ k_ipad[i] = 0x36;
+ k_opad[i] = 0x5c;
+ }
+}
+
+static void sa_export_shash(struct shash_desc *hash, int block_size,
+ int digest_size, __be32 *out)
+{
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ } sha;
+ void *state;
+ u32 *result;
+ int i;
+
+ switch (digest_size) {
+ case SHA1_DIGEST_SIZE:
+ state = &sha.sha1;
+ result = sha.sha1.state;
+ break;
+ case SHA256_DIGEST_SIZE:
+ state = &sha.sha256;
+ result = sha.sha256.state;
+ break;
+ default:
+ dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
+ digest_size);
+ return;
+ }
+
+ crypto_shash_export(hash, state);
+
+ for (i = 0; i < digest_size >> 2; i++)
+ out[i] = cpu_to_be32(result[i]);
+}
+
+static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad)
+{
+ SHASH_DESC_ON_STACK(shash, data->ctx->shash);
+ int block_size = crypto_shash_blocksize(data->ctx->shash);
+ int digest_size = crypto_shash_digestsize(data->ctx->shash);
+ u8 k_ipad[SHA1_BLOCK_SIZE];
+ u8 k_opad[SHA1_BLOCK_SIZE];
+
+ shash->tfm = data->ctx->shash;
+
+ prepare_kiopad(k_ipad, k_opad, key, key_sz);
+
+ memzero_explicit(ipad, block_size);
+ memzero_explicit(opad, block_size);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_ipad, block_size);
+ sa_export_shash(shash, block_size, digest_size, ipad);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_opad, block_size);
+
+ sa_export_shash(shash, block_size, digest_size, opad);
+}
+
+/* Derive the inverse key used in AES-CBC decryption operation */
+static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
+{
+ struct crypto_aes_ctx ctx;
+ int key_pos;
+
+ if (aes_expandkey(&ctx, key, key_sz)) {
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
+ if (key_sz == AES_KEYSIZE_192) {
+ ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
+ ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
+ }
+
+ /* Based crypto_aes_expand_key logic */
+ switch (key_sz) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ key_pos = key_sz + 24;
+ break;
+
+ case AES_KEYSIZE_256:
+ key_pos = key_sz + 24 - 4;
+ break;
+
+ default:
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
+ return 0;
+}
+
+/* Set Security context for the encryption engine */
+static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 enc, u8 *sc_buf)
+{
+ const u8 *mci = NULL;
+
+ /* Set Encryption mode selector to crypto processing */
+ sc_buf[0] = SA_CRYPTO_PROCESSING;
+
+ if (enc)
+ mci = ad->mci_enc;
+ else
+ mci = ad->mci_dec;
+ /* Set the mode control instructions in security context */
+ if (mci)
+ memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
+
+ /* For AES-CBC decryption get the inverse key */
+ if (ad->inv_key && !enc) {
+ if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
+ return -EINVAL;
+ /* For all other cases: key is used */
+ } else {
+ memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
+ }
+
+ return 0;
+}
+
+/* Set Security context for the authentication engine */
+static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 *sc_buf)
+{
+ __be32 ipad[64], opad[64];
+
+ /* Set Authentication mode selector to hash processing */
+ sc_buf[0] = SA_HASH_PROCESSING;
+ /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
+ sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
+ sc_buf[1] |= ad->auth_ctrl;
+
+ /* Copy the keys or ipad/opad */
+ if (ad->keyed_mac) {
+ ad->prep_iopad(ad, key, key_sz, ipad, opad);
+
+ /* Copy ipad to AuthKey */
+ memcpy(&sc_buf[32], ipad, ad->hash_size);
+ /* Copy opad to Aux-1 */
+ memcpy(&sc_buf[64], opad, ad->hash_size);
+ } else {
+ /* basic hash */
+ sc_buf[1] |= SA_BASIC_HASH;
+ }
+}
+
+static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
+{
+ int j;
+
+ for (j = 0; j < ((size16) ? 4 : 2); j++) {
+ *out = cpu_to_be32(*((u32 *)iv));
+ iv += 4;
+ out++;
+ }
+}
+
+/* Format general command label */
+static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ u8 enc_offset = 0, auth_offset = 0, total = 0;
+ u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
+ u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
+ u32 *word_ptr = (u32 *)cmdl;
+ int i;
+
+ /* Clear the command label */
+ memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
+
+ /* Iniialize the command update structure */
+ memzero_explicit(upd_info, sizeof(*upd_info));
+
+ if (cfg->enc_eng_id && cfg->auth_eng_id) {
+ if (cfg->enc) {
+ auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ enc_next_eng = cfg->auth_eng_id;
+
+ if (cfg->iv_size)
+ auth_offset += cfg->iv_size;
+ } else {
+ enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ auth_next_eng = cfg->enc_eng_id;
+ }
+ }
+
+ if (cfg->enc_eng_id) {
+ upd_info->flags |= SA_CMDL_UPD_ENC;
+ upd_info->enc_size.index = enc_offset >> 2;
+ upd_info->enc_offset.index = upd_info->enc_size.index + 1;
+ /* Encryption command label */
+ cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
+
+ /* Encryption modes requiring IV */
+ if (cfg->iv_size) {
+ upd_info->flags |= SA_CMDL_UPD_ENC_IV;
+ upd_info->enc_iv.index =
+ (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
+ upd_info->enc_iv.size = cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
+ (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
+ total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+ } else {
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+ }
+
+ if (cfg->auth_eng_id) {
+ upd_info->flags |= SA_CMDL_UPD_AUTH;
+ upd_info->auth_size.index = auth_offset >> 2;
+ upd_info->auth_offset.index = upd_info->auth_size.index + 1;
+ cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
+ cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+
+ total = roundup(total, 8);
+
+ for (i = 0; i < total / 4; i++)
+ word_ptr[i] = swab32(word_ptr[i]);
+
+ return total;
+}
+
+/* Update Command label */
+static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ int i = 0, j;
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
+ cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->enc_size.index] |= req->enc_size;
+ cmdl[upd_info->enc_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->enc_offset.index] |=
+ ((u32)req->enc_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
+ __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
+ u32 *enc_iv = (u32 *)req->enc_iv;
+
+ for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
+ data[j] = cpu_to_be32(*enc_iv);
+ enc_iv++;
+ }
+ }
+ }
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
+ cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->auth_size.index] |= req->auth_size;
+ cmdl[upd_info->auth_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->auth_offset.index] |=
+ ((u32)req->auth_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
+ sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
+ req->auth_iv,
+ (upd_info->auth_iv.size > 8));
+ }
+ if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
+ int offset = (req->auth_size & 0xF) ? 4 : 0;
+
+ memcpy(&cmdl[upd_info->aux_key_info.index],
+ &upd_info->aux_key[offset], 16);
+ }
+ }
+}
+
+/* Format SWINFO words to be sent to SA */
+static
+void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
+ u8 cmdl_present, u8 cmdl_offset, u8 flags,
+ u8 hash_size, u32 *swinfo)
+{
+ swinfo[0] = sc_id;
+ swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
+ if (likely(cmdl_present))
+ swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
+ __ffs(SA_SW0_CMDL_INFO_MASK));
+ swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
+
+ swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
+ swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
+ swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
+ swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
+}
+
+/* Dump the security context */
+static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
+{
+#ifdef DEBUG
+ dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, SA_CTX_MAX_SZ, false);
+#endif
+}
+
+static
+int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
+ u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+ struct algo_data *ad, u8 enc, u32 *swinfo)
+{
+ int enc_sc_offset = 0;
+ int auth_sc_offset = 0;
+ u8 *sc_buf = ctx->sc;
+ u16 sc_id = ctx->sc_id;
+ u8 first_engine = 0;
+
+ memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
+
+ if (ad->auth_eng.eng_id) {
+ if (enc)
+ first_engine = ad->enc_eng.eng_id;
+ else
+ first_engine = ad->auth_eng.eng_id;
+
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
+ sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
+ if (!ad->hash_size)
+ return -EINVAL;
+ ad->hash_size = roundup(ad->hash_size, 8);
+
+ } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ first_engine = ad->enc_eng.eng_id;
+ sc_buf[1] = SA_SCCTL_FE_ENC;
+ ad->hash_size = ad->iv_out_size;
+ }
+
+ /* SCCTL Owner info: 0=host, 1=CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
+ memcpy(&sc_buf[2], &sc_id, 2);
+ sc_buf[4] = 0x0;
+ sc_buf[5] = PRIV_ID;
+ sc_buf[6] = PRIV;
+ sc_buf[7] = 0x0;
+
+ /* Prepare context for encryption engine */
+ if (ad->enc_eng.sc_size) {
+ if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
+ &sc_buf[enc_sc_offset]))
+ return -EINVAL;
+ }
+
+ /* Prepare context for authentication engine */
+ if (ad->auth_eng.sc_size)
+ sa_set_sc_auth(ad, auth_key, auth_key_sz,
+ &sc_buf[auth_sc_offset]);
+
+ /* Set the ownership of context to CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
+
+ /* swizzle the security context */
+ sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
+
+ sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
+ SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
+
+ sa_dump_sc(sc_buf, ctx->sc_phys);
+
+ return 0;
+}
+
+/* Free the per direction context memory */
+static void sa_free_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+
+ bn = ctx->sc_id - data->sc_id_start;
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ if (ctx->sc) {
+ dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
+ ctx->sc = NULL;
+ }
+}
+
+static int sa_init_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+ int err;
+
+ spin_lock(&data->scid_lock);
+ bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
+ __set_bit(bn, data->ctx_bm);
+ data->sc_id++;
+ spin_unlock(&data->scid_lock);
+
+ ctx->sc_id = (u16)(data->sc_id_start + bn);
+
+ ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
+ if (!ctx->sc) {
+ dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
+ err = -ENOMEM;
+ goto scid_rollback;
+ }
+
+ return 0;
+
+scid_rollback:
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ return err;
+}
+
+static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+
+ crypto_free_sync_skcipher(ctx->fallback.skcipher);
+}
+
+static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ ctx->fallback.skcipher =
+ crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.skcipher)) {
+ dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->fallback.skcipher);
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+ return 0;
+}
+
+static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen, struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int ret;
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+
+ memzero_explicit(&cfg, sizeof(cfg));
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.iv_size = crypto_skcipher_ivsize(tfm);
+
+ crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
+ tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
+ &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
+ &ctx->dec.epib[1]))
+ goto badkey;
+
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->dec.cmdl_size = cmdl_len;
+ ctx->iv_idx = ad->iv_idx;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_cbc_enc_array[key_idx];
+ ad.mci_dec = mci_cbc_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.iv_idx = 4;
+ ad.iv_out_size = 16;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_ecb_enc_array[key_idx];
+ ad.mci_dec = mci_ecb_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_ECB;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_cbc_3des_enc_array;
+ ad.mci_dec = mci_cbc_3des_dec_array;
+ ad.ealg_id = SA_EALG_ID_3DES_CBC;
+ ad.iv_idx = 6;
+ ad.iv_out_size = 8;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_ecb_3des_enc_array;
+ ad.mci_dec = mci_ecb_3des_dec_array;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static void sa_aes_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct skcipher_request *req;
+ int sglen;
+ u32 *result;
+ __be32 *mdptr;
+ size_t ml, pl;
+ int i;
+ enum dma_data_direction dir_src;
+ bool diff_dst;
+
+ req = container_of(rxd->req, struct skcipher_request, base);
+ sglen = sg_nents_for_len(req->src, req->cryptlen);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ if (req->iv) {
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
+ &ml);
+ result = (u32 *)req->iv;
+
+ for (i = 0; i < (rxd->enc_iv_size / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
+ }
+
+ dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(req->dst, req->cryptlen);
+
+ dma_unmap_sg(rxd->ddev, req->dst, sglen,
+ DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ skcipher_request_complete(req, 0);
+}
+
+static void
+sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
+{
+ u32 *out, *in;
+ int i;
+
+ for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
+ *out++ = *in++;
+
+ mdptr[4] = (0xFFFF << 16);
+ for (out = &mdptr[5], in = psdata, i = 0;
+ i < pslen / sizeof(u32); i++)
+ *out++ = *in++;
+}
+
+static int sa_run(struct sa_req *req)
+{
+ struct sa_rx_data *rxd;
+ gfp_t gfp_flags;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
+ struct device *ddev;
+ struct dma_chan *dma_rx;
+ int sg_nents, src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
+ struct scatterlist *src, *dst;
+ size_t pl, ml, split_size;
+ struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
+ int ret;
+ struct dma_async_tx_descriptor *tx_out;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ rxd = kzalloc(sizeof(*rxd), gfp_flags);
+ if (!rxd)
+ return -ENOMEM;
+
+ if (req->src != req->dst) {
+ diff_dst = true;
+ dir_src = DMA_TO_DEVICE;
+ } else {
+ diff_dst = false;
+ dir_src = DMA_BIDIRECTIONAL;
+ }
+
+ /*
+ * SA2UL has an interesting feature where the receive DMA channel
+ * is selected based on the data passed to the engine. Within the
+ * transition range, there is also a space where it is impossible
+ * to determine where the data will end up, and this should be
+ * avoided. This will be handled by the SW fallback mechanism by
+ * the individual algorithm implementations.
+ */
+ if (req->size >= 256)
+ dma_rx = pdata->dma_rx2;
+ else
+ dma_rx = pdata->dma_rx1;
+
+ ddev = dma_rx->device->dev;
+
+ memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
+
+ sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
+
+ if (req->type != CRYPTO_ALG_TYPE_AHASH) {
+ if (req->enc)
+ req->type |=
+ (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
+ else
+ req->type |=
+ (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
+ }
+
+ cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
+
+ /*
+ * Map the packets, first we check if the data fits into a single
+ * sg entry and use that if possible. If it does not fit, we check
+ * if we need to do sg_split to align the scatterlist data on the
+ * actual data size being processed by the crypto engine.
+ */
+ src = req->src;
+ sg_nents = sg_nents_for_len(src, req->size);
+
+ split_size = req->size;
+
+ if (sg_nents == 1 && split_size <= req->src->length) {
+ src = &rxd->rx_sg;
+ sg_init_table(src, 1);
+ sg_set_page(src, sg_page(req->src), split_size,
+ req->src->offset);
+ src_nents = 1;
+ dma_map_sg(ddev, src, sg_nents, dir_src);
+ } else {
+ mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
+ dir_src);
+ ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
+ &src, &src_nents, gfp_flags);
+ if (ret) {
+ src_nents = sg_nents;
+ src = req->src;
+ } else {
+ rxd->split_src_sg = src;
+ }
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst = src;
+ } else {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+
+ if (dst_nents == 1 && split_size <= req->dst->length) {
+ dst = &rxd->tx_sg;
+ sg_init_table(dst, 1);
+ sg_set_page(dst, sg_page(req->dst), split_size,
+ req->dst->offset);
+ dst_nents = 1;
+ dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
+ &split_size, &dst, &dst_nents,
+ gfp_flags);
+ if (ret) {
+ dst_nents = dst_nents;
+ dst = req->dst;
+ } else {
+ rxd->split_dst_sg = dst;
+ }
+ }
+ }
+
+ if (unlikely(src_nents != sg_nents)) {
+ dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
+ ret = -EIO;
+ goto err_cleanup;
+ }
+
+ rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd->tx_in) {
+ dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ rxd->req = (void *)req->base;
+ rxd->enc = req->enc;
+ rxd->ddev = ddev;
+ rxd->src = src;
+ rxd->dst = dst;
+ rxd->iv_idx = req->ctx->iv_idx;
+ rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
+ rxd->tx_in->callback = req->callback;
+ rxd->tx_in->callback_param = rxd;
+
+ tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
+ src_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tx_out) {
+ dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /*
+ * Prepare metadata for DMA engine. This essentially describes the
+ * crypto algorithm to be used, data sizes, different keys etc.
+ */
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
+
+ sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
+ sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
+ sa_ctx->epib);
+
+ ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
+ dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
+
+ dmaengine_submit(tx_out);
+ dmaengine_submit(rxd->tx_in);
+
+ dma_async_issue_pending(dma_rx);
+ dma_async_issue_pending(pdata->dma_tx);
+
+ return -EINPROGRESS;
+
+err_cleanup:
+ dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
+ kfree(rxd->split_src_sg);
+
+ if (req->src != req->dst) {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+ dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ return ret;
+}
+
+static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
+{
+ struct sa_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct sa_req sa_req = { 0 };
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ if (req->cryptlen % alg->cra_blocksize)
+ return -EINVAL;
+
+ /* Use SW fallback if the data size is not supported */
+ if (req->cryptlen > SA_MAX_DATA_SZ ||
+ (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
+ req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
+
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ if (enc)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ sa_req.size = req->cryptlen;
+ sa_req.enc_size = req->cryptlen;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aes_dma_in_callback;
+ sa_req.mdata_size = 44;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_encrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 1);
+}
+
+static int sa_decrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 0);
+}
+
+static void sa_sha_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ unsigned int authsize;
+ int i, sg_nents;
+ size_t ml, pl;
+ u32 *result;
+ __be32 *mdptr;
+
+ req = container_of(rxd->req, struct ahash_request, base);
+ tfm = crypto_ahash_reqtfm(req);
+ authsize = crypto_ahash_digestsize(tfm);
+
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ result = (u32 *)req->result;
+
+ for (i = 0; i < (authsize / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + 4]);
+
+ sg_nents = sg_nents_for_len(req->src, req->nbytes);
+ dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
+
+ kfree(rxd->split_src_sg);
+
+ kfree(rxd);
+
+ ahash_request_complete(req, 0);
+}
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int sa_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (sa_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
+ break;
+ case SHA512_DIGEST_SIZE:
+ memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sa_sha_run(struct ahash_request *req)
+{
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_req sa_req = { 0 };
+ size_t auth_len;
+
+ auth_len = req->nbytes;
+
+ if (!auth_len)
+ return zero_message_process(req);
+
+ if (auth_len > SA_MAX_DATA_SZ ||
+ (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct ahash_request *subreq = &rctx->fallback_req;
+ int ret = 0;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ crypto_ahash_init(subreq);
+
+ subreq->nbytes = auth_len;
+ subreq->src = req->src;
+ subreq->result = req->result;
+
+ ret |= crypto_ahash_update(subreq);
+
+ subreq->nbytes = 0;
+
+ ret |= crypto_ahash_final(subreq);
+
+ return ret;
+ }
+
+ sa_req.size = auth_len;
+ sa_req.auth_size = auth_len;
+ sa_req.src = req->src;
+ sa_req.dst = req->src;
+ sa_req.enc = true;
+ sa_req.type = CRYPTO_ALG_TYPE_AHASH;
+ sa_req.callback = sa_sha_dma_in_callback;
+ sa_req.mdata_size = 28;
+ sa_req.ctx = ctx;
+ sa_req.base = &req->base;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
+{
+ int bs = crypto_shash_blocksize(ctx->shash);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+
+ memset(ctx->authkey, 0, bs);
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = 0;
+ cfg.akey = NULL;
+ cfg.akey_len = 0;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
+ &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dev_data = data;
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ if (alg_base) {
+ ctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
+ alg_base);
+ return PTR_ERR(ctx->shash);
+ }
+ /* for fallback */
+ ctx->fallback.ahash =
+ crypto_alloc_ahash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback.ahash)) {
+ dev_err(ctx->dev_data->dev,
+ "Could not load fallback driver\n");
+ return PTR_ERR(ctx->fallback.ahash);
+ }
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sa_sha_req_ctx) +
+ crypto_ahash_reqsize(ctx->fallback.ahash));
+
+ return 0;
+}
+
+static int sa_sha_digest(struct ahash_request *req)
+{
+ return sa_sha_run(req);
+}
+
+static int sa_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
+ crypto_ahash_digestsize(tfm), (u64)rctx);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int sa_sha_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int sa_sha_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int sa_sha_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sa_sha_import(struct ahash_request *req, const void *in)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int sa_sha_export(struct ahash_request *req, void *out)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = &rctx->fallback_req;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(subreq, out);
+}
+
+static int sa_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha1");
+
+ ad.aalg_id = SA_AALG_ID_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha256_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha256");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha512");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_512;
+ ad.hash_size = SHA512_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static void sa_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
+ sa_free_ctx_info(&ctx->enc, data);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_ahash(ctx->fallback.ahash);
+}
+
+static void sa_aead_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ unsigned int start;
+ unsigned int authsize;
+ u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
+ size_t pl, ml;
+ int i, sglen;
+ int err = 0;
+ u16 auth_len;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ req = container_of(rxd->req, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ start = req->assoclen + req->cryptlen;
+ authsize = crypto_aead_authsize(tfm);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ for (i = 0; i < (authsize / 4); i++)
+ mdptr[i + 4] = swab32(mdptr[i + 4]);
+
+ auth_len = req->assoclen + req->cryptlen;
+ if (!rxd->enc)
+ auth_len -= authsize;
+
+ sglen = sg_nents_for_len(rxd->src, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(rxd->dst, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ if (rxd->enc) {
+ scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
+ 1);
+ } else {
+ start -= authsize;
+ scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
+ 0);
+
+ err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
+ }
+
+ kfree(rxd);
+
+ aead_request_complete(req, err);
+}
+
+static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
+ const char *fallback)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+
+ ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
+ return PTR_ERR(ctx->shash);
+ }
+
+ ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.aead)) {
+ dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
+ fallback);
+ return PTR_ERR(ctx->fallback.aead);
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fallback.aead));
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ return ret;
+}
+
+static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha1",
+ "authenc(hmac(sha1-ce),cbc(aes-ce))");
+}
+
+static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha256",
+ "authenc(hmac(sha256-ce),cbc(aes-ce))");
+}
+
+static void sa_exit_tfm_aead(struct crypto_aead *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_aead(ctx->fallback.aead);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+}
+
+/* AEAD algorithm configuration interface function */
+static int sa_aead_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen,
+ struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_authenc_keys keys;
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int key_idx;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ return -EINVAL;
+
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ key_idx = (keys.enckeylen >> 3) - 2;
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad->ctx = ctx;
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+ ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
+ ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
+ ad->inv_key = true;
+ ad->keyed_mac = true;
+ ad->ealg_id = SA_EALG_ID_AES_CBC;
+ ad->prep_iopad = sa_prepare_iopads;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.enc = true;
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = crypto_aead_ivsize(authenc);
+ cfg.akey = keys.authkey;
+ cfg.akey_len = keys.authkeylen;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 1, &ctx->enc.epib[1]))
+ return -EINVAL;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 0, &ctx->dec.epib[1]))
+ return -EINVAL;
+
+ cfg.enc = false;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->dec.cmdl_size = cmdl_len;
+
+ crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->fallback.aead,
+ crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+ crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+
+ return 0;
+}
+
+static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+ return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
+}
+
+static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_req sa_req = { 0 };
+ size_t auth_size, enc_size;
+
+ enc_size = req->cryptlen;
+ auth_size = req->assoclen + req->cryptlen;
+
+ if (!enc) {
+ enc_size -= crypto_aead_authsize(tfm);
+ auth_size -= crypto_aead_authsize(tfm);
+ }
+
+ if (auth_size > SA_MAX_DATA_SZ ||
+ (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct aead_request *subreq = aead_request_ctx(req);
+ int ret;
+
+ aead_request_set_tfm(subreq, ctx->fallback.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ ret = enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+ return ret;
+ }
+
+ sa_req.enc_offset = req->assoclen;
+ sa_req.enc_size = enc_size;
+ sa_req.auth_size = auth_size;
+ sa_req.size = auth_size;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_AEAD;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aead_dma_in_callback;
+ sa_req.mdata_size = 52;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+
+ return sa_run(&sa_req);
+}
+
+/* AEAD algorithm encrypt interface function */
+static int sa_aead_encrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 1);
+}
+
+/* AEAD algorithm decrypt interface function */
+static int sa_aead_decrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 0);
+}
+
+static struct sa_alg_tmpl sa_algs[] = {
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sa_aes_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sa_aes_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = sa_3des_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = sa_3des_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha1_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha1_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha256_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha256_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha512_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha512_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha1),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha1,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha1_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha256),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha256,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha256_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
+};
+
+/* Register the algorithms in crypto framework */
+static void sa_register_algos(const struct device *dev)
+{
+ char *alg_name;
+ u32 type;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
+ alg_name = sa_algs[i].alg.skcipher.base.cra_name;
+ err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
+ } else if (type == CRYPTO_ALG_TYPE_AHASH) {
+ alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
+ err = crypto_register_ahash(&sa_algs[i].alg.ahash);
+ } else if (type == CRYPTO_ALG_TYPE_AEAD) {
+ alg_name = sa_algs[i].alg.aead.base.cra_name;
+ err = crypto_register_aead(&sa_algs[i].alg.aead);
+ } else {
+ dev_err(dev,
+ "un-supported crypto algorithm (%d)",
+ sa_algs[i].type);
+ continue;
+ }
+
+ if (err)
+ dev_err(dev, "Failed to register '%s'\n", alg_name);
+ else
+ sa_algs[i].registered = true;
+ }
+}
+
+/* Unregister the algorithms in crypto framework */
+static void sa_unregister_algos(const struct device *dev)
+{
+ u32 type;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (!sa_algs[i].registered)
+ continue;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+ crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
+ else if (type == CRYPTO_ALG_TYPE_AHASH)
+ crypto_unregister_ahash(&sa_algs[i].alg.ahash);
+ else if (type == CRYPTO_ALG_TYPE_AEAD)
+ crypto_unregister_aead(&sa_algs[i].alg.aead);
+
+ sa_algs[i].registered = false;
+ }
+}
+
+static int sa_init_mem(struct sa_crypto_data *dev_data)
+{
+ struct device *dev = &dev_data->pdev->dev;
+ /* Setup dma pool for security context buffers */
+ dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
+ SA_CTX_MAX_SZ, 64, 0);
+ if (!dev_data->sc_pool) {
+ dev_err(dev, "Failed to create dma pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sa_dma_init(struct sa_crypto_data *dd)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dd->dma_rx1 = NULL;
+ dd->dma_tx = NULL;
+ dd->dma_rx2 = NULL;
+
+ ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
+ if (ret)
+ return ret;
+
+ dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
+ if (IS_ERR(dd->dma_rx1)) {
+ if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
+ return PTR_ERR(dd->dma_rx1);
+ }
+
+ dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
+ if (IS_ERR(dd->dma_rx2)) {
+ dma_release_channel(dd->dma_rx1);
+ if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
+ return PTR_ERR(dd->dma_rx2);
+ }
+
+ dd->dma_tx = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_tx)) {
+ if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request tx DMA channel\n");
+ ret = PTR_ERR(dd->dma_tx);
+ goto err_dma_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_tx, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+
+err_dma_tx:
+ dma_release_channel(dd->dma_rx1);
+ dma_release_channel(dd->dma_rx2);
+
+ return ret;
+}
+
+static int sa_link_child(struct device *dev, void *data)
+{
+ struct device *parent = data;
+
+ device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
+
+ return 0;
+}
+
+static int sa_ul_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *saul_base;
+ struct sa_crypto_data *dev_data;
+ u32 val;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ sa_k3_dev = dev;
+ dev_data->dev = dev;
+ dev_data->pdev = pdev;
+ platform_set_drvdata(pdev, dev_data);
+ dev_set_drvdata(sa_k3_dev, dev_data);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ sa_init_mem(dev_data);
+ ret = sa_dma_init(dev_data);
+ if (ret)
+ goto disable_pm_runtime;
+
+ spin_lock_init(&dev_data->scid_lock);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ saul_base = devm_ioremap_resource(dev, res);
+
+ dev_data->base = saul_base;
+ val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
+
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+
+ sa_register_algos(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto release_dma;
+
+ device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
+
+ return 0;
+
+release_dma:
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+disable_pm_runtime:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sa_ul_remove(struct platform_device *pdev)
+{
+ struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ {.compatible = "ti,j721e-sa2ul",},
+ {.compatible = "ti,am654-sa2ul",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver sa_ul_driver = {
+ .probe = sa_ul_probe,
+ .remove = sa_ul_remove,
+ .driver = {
+ .name = "saul-crypto",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(sa_ul_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
new file mode 100644
index 000000000000..7f7e3fe60d11
--- /dev/null
+++ b/drivers/crypto/sa2ul.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+
+#ifndef _K3_SA2UL_
+#define _K3_SA2UL_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/hw_random.h>
+#include <crypto/aes.h>
+
+#define SA_ENGINE_ENABLE_CONTROL 0x1000
+
+struct sa_tfm_ctx;
+/*
+ * SA_ENGINE_ENABLE_CONTROL register bits
+ */
+#define SA_EEC_ENCSS_EN 0x00000001
+#define SA_EEC_AUTHSS_EN 0x00000002
+#define SA_EEC_TRNG_EN 0x00000008
+#define SA_EEC_PKA_EN 0x00000010
+#define SA_EEC_CTXCACH_EN 0x00000080
+#define SA_EEC_CPPI_PORT_IN_EN 0x00000200
+#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800
+
+/*
+ * Encoding used to identify the typo of crypto operation
+ * performed on the packet when the packet is returned
+ * by SA
+ */
+#define SA_REQ_SUBTYPE_ENC 0x0001
+#define SA_REQ_SUBTYPE_DEC 0x0002
+#define SA_REQ_SUBTYPE_SHIFT 16
+#define SA_REQ_SUBTYPE_MASK 0xffff
+
+/* Number of 32 bit words in EPIB */
+#define SA_DMA_NUM_EPIB_WORDS 4
+
+/* Number of 32 bit words in PS data */
+#define SA_DMA_NUM_PS_WORDS 16
+#define NKEY_SZ 3
+#define MCI_SZ 27
+
+/*
+ * Maximum number of simultaeneous security contexts
+ * supported by the driver
+ */
+#define SA_MAX_NUM_CTX 512
+
+/*
+ * Assumption: CTX size is multiple of 32
+ */
+#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \
+ ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0)
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+/* Next Engine Select code in CP_ACE */
+#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */
+#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */
+#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */
+#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */
+#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */
+
+/*
+ * Command Label Definitions
+ */
+#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */
+#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */
+/* 16-bit Length of Data to be processed */
+#define SA_CMDL_OFFSET_DATA_LEN 2
+#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */
+#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */
+#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */
+#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */
+#define SA_CMDL_OFFSET_OPTION_BYTE 8
+
+#define SA_CMDL_HEADER_SIZE_BYTES 8
+
+#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72
+#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \
+ SA_CMDL_OPTION_BYTES_MAX_SIZE)
+
+/* SWINFO word-0 flags */
+#define SA_SW_INFO_FLAG_EVICT 0x0001
+#define SA_SW_INFO_FLAG_TEAR 0x0002
+#define SA_SW_INFO_FLAG_NOPD 0x0004
+
+/*
+ * This type represents the various packet types to be processed
+ * by the PHP engine in SA.
+ * It is used to identify the corresponding PHP processing function.
+ */
+#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */
+#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */
+/* IPSec Encapsulating Security Payload */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3
+/* Indicates that it is in data mode, It may not be used by PHP */
+#define SA_CTX_PE_PKT_TYPE_NONE 4
+#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */
+#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */
+
+#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */
+#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */
+/* Size of security context for PHP engine */
+#define SA_CTX_PHP_PE_CTX_SZ 64
+
+#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ)
+
+/*
+ * Encoding of F/E control in SCCTL
+ * Bit 0-1: Fetch PHP Bytes
+ * Bit 2-3: Fetch Encryption/Air Ciphering Bytes
+ * Bit 4-5: Fetch Authentication Bytes or Encr pass 2
+ * Bit 6-7: Evict PHP Bytes
+ *
+ * where 00 = 0 bytes
+ * 01 = 64 bytes
+ * 10 = 96 bytes
+ * 11 = 128 bytes
+ */
+#define SA_CTX_DMA_SIZE_0 0
+#define SA_CTX_DMA_SIZE_64 1
+#define SA_CTX_DMA_SIZE_96 2
+#define SA_CTX_DMA_SIZE_128 3
+
+/*
+ * Byte offset of the owner word in SCCTL
+ * in the security context
+ */
+#define SA_CTX_SCCTL_OWNER_OFFSET 0
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+#define SA_SCCTL_FE_AUTH_ENC 0x65
+#define SA_SCCTL_FE_ENC 0x8D
+
+#define SA_ALIGN_MASK (sizeof(u32) - 1)
+#define SA_ALIGNED __aligned(32)
+
+#define SA_AUTH_SW_CTRL_MD5 1
+#define SA_AUTH_SW_CTRL_SHA1 2
+#define SA_AUTH_SW_CTRL_SHA224 3
+#define SA_AUTH_SW_CTRL_SHA256 4
+#define SA_AUTH_SW_CTRL_SHA384 5
+#define SA_AUTH_SW_CTRL_SHA512 6
+
+/* SA2UL can only handle maximum data size of 64KB */
+#define SA_MAX_DATA_SZ U16_MAX
+
+/*
+ * SA2UL can provide unpredictable results with packet sizes that fall
+ * the following range, so avoid using it.
+ */
+#define SA_UNSAFE_DATA_SZ_MIN 240
+#define SA_UNSAFE_DATA_SZ_MAX 256
+
+/**
+ * struct sa_crypto_data - Crypto driver instance data
+ * @base: Base address of the register space
+ * @pdev: Platform device pointer
+ * @sc_pool: security context pool
+ * @dev: Device pointer
+ * @scid_lock: secure context ID lock
+ * @sc_id_start: starting index for SC ID
+ * @sc_id_end: Ending index for SC ID
+ * @sc_id: Security Context ID
+ * @ctx_bm: Bitmap to keep track of Security context ID's
+ * @ctx: SA tfm context pointer
+ * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes
+ * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes
+ * @dma_tx: Pointer to DMA TX channel
+ */
+struct sa_crypto_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct dma_pool *sc_pool;
+ struct device *dev;
+ spinlock_t scid_lock; /* lock for SC-ID allocation */
+ /* Security context data */
+ u16 sc_id_start;
+ u16 sc_id_end;
+ u16 sc_id;
+ unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX,
+ BITS_PER_LONG)];
+ struct sa_tfm_ctx *ctx;
+ struct dma_chan *dma_rx1;
+ struct dma_chan *dma_rx2;
+ struct dma_chan *dma_tx;
+};
+
+/**
+ * struct sa_cmdl_param_info: Command label parameters info
+ * @index: Index of the parameter in the command label format
+ * @offset: the offset of the parameter
+ * @size: Size of the parameter
+ */
+struct sa_cmdl_param_info {
+ u16 index;
+ u16 offset;
+ u16 size;
+};
+
+/* Maximum length of Auxiliary data in 32bit words */
+#define SA_MAX_AUX_DATA_WORDS 8
+
+/**
+ * struct sa_cmdl_upd_info: Command label updation info
+ * @flags: flags in command label
+ * @submode: Encryption submodes
+ * @enc_size: Size of first pass encryption size
+ * @enc_size2: Size of second pass encryption size
+ * @enc_offset: Encryption payload offset in the packet
+ * @enc_iv: Encryption initialization vector for pass2
+ * @enc_iv2: Encryption initialization vector for pass2
+ * @aad: Associated data
+ * @payload: Payload info
+ * @auth_size: Authentication size for pass 1
+ * @auth_size2: Authentication size for pass 2
+ * @auth_offset: Authentication payload offset
+ * @auth_iv: Authentication initialization vector
+ * @aux_key_info: Authentication aux key information
+ * @aux_key: Aux key for authentication
+ */
+struct sa_cmdl_upd_info {
+ u16 flags;
+ u16 submode;
+ struct sa_cmdl_param_info enc_size;
+ struct sa_cmdl_param_info enc_size2;
+ struct sa_cmdl_param_info enc_offset;
+ struct sa_cmdl_param_info enc_iv;
+ struct sa_cmdl_param_info enc_iv2;
+ struct sa_cmdl_param_info aad;
+ struct sa_cmdl_param_info payload;
+ struct sa_cmdl_param_info auth_size;
+ struct sa_cmdl_param_info auth_size2;
+ struct sa_cmdl_param_info auth_offset;
+ struct sa_cmdl_param_info auth_iv;
+ struct sa_cmdl_param_info aux_key_info;
+ u32 aux_key[SA_MAX_AUX_DATA_WORDS];
+};
+
+/*
+ * Number of 32bit words appended after the command label
+ * in PSDATA to identify the crypto request context.
+ * word-0: Request type
+ * word-1: pointer to request
+ */
+#define SA_PSDATA_CTX_WORDS 4
+
+/* Maximum size of Command label in 32 words */
+#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS)
+
+/**
+ * struct sa_ctx_info: SA context information
+ * @sc: Pointer to security context
+ * @sc_phys: Security context physical address that is passed on to SA2UL
+ * @sc_id: Security context ID
+ * @cmdl_size: Command label size
+ * @cmdl: Command label for a particular iteration
+ * @cmdl_upd_info: structure holding command label updation info
+ * @epib: Extended protocol information block words
+ */
+struct sa_ctx_info {
+ u8 *sc;
+ dma_addr_t sc_phys;
+ u16 sc_id;
+ u16 cmdl_size;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_cmdl_upd_info cmdl_upd_info;
+ /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */
+ u32 epib[SA_DMA_NUM_EPIB_WORDS];
+};
+
+/**
+ * struct sa_tfm_ctx: TFM context structure
+ * @dev_data: struct sa_crypto_data pointer
+ * @enc: struct sa_ctx_info for encryption
+ * @dec: struct sa_ctx_info for decryption
+ * @keylen: encrption/decryption keylength
+ * @iv_idx: Initialization vector index
+ * @key: encryption key
+ * @fallback: SW fallback algorithm
+ */
+struct sa_tfm_ctx {
+ struct sa_crypto_data *dev_data;
+ struct sa_ctx_info enc;
+ struct sa_ctx_info dec;
+ struct sa_ctx_info auth;
+ int keylen;
+ int iv_idx;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ u8 authkey[SHA512_BLOCK_SIZE];
+ struct crypto_shash *shash;
+ /* for fallback */
+ union {
+ struct crypto_sync_skcipher *skcipher;
+ struct crypto_ahash *ahash;
+ struct crypto_aead *aead;
+ } fallback;
+};
+
+/**
+ * struct sa_sha_req_ctx: Structure used for sha request
+ * @dev_data: struct sa_crypto_data pointer
+ * @cmdl: Complete command label with psdata and epib included
+ * @fallback_req: SW fallback request container
+ */
+struct sa_sha_req_ctx {
+ struct sa_crypto_data *dev_data;
+ u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS];
+ struct ahash_request fallback_req;
+};
+
+enum sa_submode {
+ SA_MODE_GEN = 0,
+ SA_MODE_CCM,
+ SA_MODE_GCM,
+ SA_MODE_GMAC
+};
+
+/* Encryption algorithms */
+enum sa_ealg_id {
+ SA_EALG_ID_NONE = 0, /* No encryption */
+ SA_EALG_ID_NULL, /* NULL encryption */
+ SA_EALG_ID_AES_CTR, /* AES Counter mode */
+ SA_EALG_ID_AES_F8, /* AES F8 mode */
+ SA_EALG_ID_AES_CBC, /* AES CBC mode */
+ SA_EALG_ID_DES_CBC, /* DES CBC mode */
+ SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */
+ SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */
+ SA_EALG_ID_GCM, /* Galois Counter mode */
+ SA_EALG_ID_AES_ECB,
+ SA_EALG_ID_LAST
+};
+
+/* Authentication algorithms */
+enum sa_aalg_id {
+ SA_AALG_ID_NONE = 0, /* No Authentication */
+ SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */
+ SA_AALG_ID_MD5, /* MD5 mode */
+ SA_AALG_ID_SHA1, /* SHA1 mode */
+ SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */
+ SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */
+ SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */
+ SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */
+ SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */
+ SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */
+ SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */
+ SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */
+ SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */
+ SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */
+ SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */
+};
+
+/*
+ * Mode control engine algorithms used to index the
+ * mode control instruction tables
+ */
+enum sa_eng_algo_id {
+ SA_ENG_ALGO_ECB = 0,
+ SA_ENG_ALGO_CBC,
+ SA_ENG_ALGO_CFB,
+ SA_ENG_ALGO_OFB,
+ SA_ENG_ALGO_CTR,
+ SA_ENG_ALGO_F8,
+ SA_ENG_ALGO_F8F9,
+ SA_ENG_ALGO_GCM,
+ SA_ENG_ALGO_GMAC,
+ SA_ENG_ALGO_CCM,
+ SA_ENG_ALGO_CMAC,
+ SA_ENG_ALGO_CBCMAC,
+ SA_NUM_ENG_ALGOS
+};
+
+/**
+ * struct sa_eng_info: Security accelerator engine info
+ * @eng_id: Engine ID
+ * @sc_size: security context size
+ */
+struct sa_eng_info {
+ u8 eng_id;
+ u16 sc_size;
+};
+
+#endif /* _K3_SA2UL_ */
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 466e30bd529c..0c8cb23ae708 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -146,11 +146,12 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct sahara_aes_reqctx {
unsigned long mode;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -617,10 +618,10 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ return crypto_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
@@ -651,21 +652,19 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
@@ -673,21 +672,19 @@ static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, 0);
@@ -695,21 +692,19 @@ static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
@@ -717,21 +712,19 @@ static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_CBC);
@@ -742,14 +735,15 @@ static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
const char *name = crypto_tfm_alg_name(&tfm->base);
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
+ ctx->fallback = crypto_alloc_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
return PTR_ERR(ctx->fallback);
}
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback));
return 0;
}
@@ -758,7 +752,7 @@ static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9c6db7f698c4..7c547352a862 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2264,7 +2264,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2285,7 +2286,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2306,7 +2308,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2330,7 +2333,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2352,7 +2356,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2373,7 +2378,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2394,7 +2400,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2418,7 +2425,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2440,7 +2448,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2461,7 +2470,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2482,7 +2492,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2506,7 +2517,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2528,7 +2540,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -2549,7 +2562,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -2571,7 +2585,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -2592,7 +2607,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -2614,7 +2630,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2635,7 +2652,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2655,7 +2673,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2678,7 +2697,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2699,7 +2719,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-talitos",
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = skcipher_aes_setkey,
@@ -2712,7 +2733,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-talitos",
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2727,7 +2749,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "ctr-aes-talitos",
.base.cra_blocksize = 1,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2742,7 +2765,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = skcipher_des_setkey,
@@ -2755,7 +2779,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -2770,7 +2795,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-3des-talitos",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = skcipher_des3_setkey,
@@ -2784,7 +2810,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-3des-talitos",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2804,7 +2831,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2819,7 +2847,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2834,7 +2863,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2849,7 +2879,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2864,7 +2895,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2879,7 +2911,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2894,7 +2927,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2909,7 +2943,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2924,7 +2959,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2939,7 +2975,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2954,7 +2991,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2969,7 +3007,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c24f2db8d5e8..a5ee8c2fb4e0 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -545,7 +545,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
*
* Initialize structures.
*/
-static int hash_init(struct ahash_request *req)
+static int ux500_hash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1359,7 +1359,7 @@ static int ahash_sha1_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA1_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int ahash_sha256_init(struct ahash_request *req)
@@ -1372,7 +1372,7 @@ static int ahash_sha256_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA256_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int ahash_sha1_digest(struct ahash_request *req)
@@ -1425,7 +1425,7 @@ static int hmac_sha1_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA1_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int hmac_sha256_init(struct ahash_request *req)
@@ -1438,7 +1438,7 @@ static int hmac_sha256_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA256_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int hmac_sha1_digest(struct ahash_request *req)
@@ -1515,7 +1515,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha1_digest,
@@ -1538,7 +1538,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha256_digest,
@@ -1561,7 +1561,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha1_digest,
@@ -1585,7 +1585,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha256_digest,
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index cb8a6ea2a4bc..b2601958282e 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -597,7 +597,8 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "virtio_crypto_aes_cbc",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
.base.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c8a962c62663..77e744eaedd0 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -498,11 +498,11 @@ free_vqs:
}
#endif
-static unsigned int features[] = {
+static const unsigned int features[] = {
/* none */
};
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index cd11558893cd..27079354dbe9 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -364,6 +364,7 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = ZYNQMP_AES_BLK_SIZE,
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 8e32345be0f7..f50828526331 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(bdev_dax_pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
- if (!blk_queue_dax(bdev->bd_queue))
+ if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
return dax_get_by_host(bdev->bd_disk->disk_name);
}
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 8c31b0f2e28f..56efbeb7851e 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -293,7 +293,7 @@ static void devfreq_event_release_edev(struct device *dev)
/**
* devfreq_event_add_edev() - Add new devfreq-event device.
* @dev : the device owning the devfreq-event device being created
- * @desc : the devfreq-event device's decriptor which include essential
+ * @desc : the devfreq-event device's descriptor which include essential
* data for devfreq-event device.
*
* Note that this function add new devfreq-event device to devfreq-event class
@@ -385,7 +385,7 @@ static void devm_devfreq_event_release(struct device *dev, void *res)
/**
* devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
* @dev : the device owning the devfreq-event device being created
- * @desc : the devfreq-event device's decriptor which include essential
+ * @desc : the devfreq-event device's descriptor which include essential
* data for devfreq-event device.
*
* Note that this function manages automatically the memory of devfreq-event
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 52b9c3e141f3..561d91b2d3bf 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -49,6 +49,11 @@ static LIST_HEAD(devfreq_governor_list);
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
+static const char timer_name[][DEVFREQ_NAME_LEN] = {
+ [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
+ [DEVFREQ_TIMER_DELAYED] = { "delayed" },
+};
+
/**
* find_device_devfreq() - find devfreq struct using device pointer
* @dev: device pointer used to lookup device devfreq.
@@ -454,7 +459,17 @@ void devfreq_monitor_start(struct devfreq *devfreq)
if (devfreq->governor->interrupt_driven)
return;
- INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+ switch (devfreq->profile->timer) {
+ case DEVFREQ_TIMER_DEFERRABLE:
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+ break;
+ case DEVFREQ_TIMER_DELAYED:
+ INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
+ break;
+ default:
+ return;
+ }
+
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
@@ -771,6 +786,11 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->data = data;
devfreq->nb.notifier_call = devfreq_notifier_call;
+ if (devfreq->profile->timer < 0
+ || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
+ goto err_out;
+ }
+
if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
@@ -1260,18 +1280,20 @@ EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct devfreq *devfreq = to_devfreq(dev);
- return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+ struct devfreq *df = to_devfreq(dev);
+ return sprintf(buf, "%s\n", dev_name(df->dev.parent));
}
static DEVICE_ATTR_RO(name);
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!to_devfreq(dev)->governor)
+ struct devfreq *df = to_devfreq(dev);
+
+ if (!df->governor)
return -EINVAL;
- return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+ return sprintf(buf, "%s\n", df->governor->name);
}
static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
@@ -1282,6 +1304,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
char str_governor[DEVFREQ_NAME_LEN + 1];
const struct devfreq_governor *governor, *prev_governor;
+ if (!df->governor)
+ return -EINVAL;
+
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
return -EINVAL;
@@ -1295,20 +1320,18 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if ((df->governor && df->governor->immutable) ||
- governor->immutable) {
+ } else if (df->governor->immutable || governor->immutable) {
ret = -EINVAL;
goto out;
}
- if (df->governor) {
- ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
- if (ret) {
- dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
- __func__, df->governor->name, ret);
- goto out;
- }
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+ __func__, df->governor->name, ret);
+ goto out;
}
+
prev_governor = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
@@ -1343,13 +1366,16 @@ static ssize_t available_governors_show(struct device *d,
struct devfreq *df = to_devfreq(d);
ssize_t count = 0;
+ if (!df->governor)
+ return -EINVAL;
+
mutex_lock(&devfreq_list_lock);
/*
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor && df->governor->immutable) {
+ if (df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
@@ -1383,27 +1409,37 @@ static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned long freq;
- struct devfreq *devfreq = to_devfreq(dev);
+ struct devfreq *df = to_devfreq(dev);
- if (devfreq->profile->get_cur_freq &&
- !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+ if (!df->profile)
+ return -EINVAL;
+
+ if (df->profile->get_cur_freq &&
+ !df->profile->get_cur_freq(df->dev.parent, &freq))
return sprintf(buf, "%lu\n", freq);
- return sprintf(buf, "%lu\n", devfreq->previous_freq);
+ return sprintf(buf, "%lu\n", df->previous_freq);
}
static DEVICE_ATTR_RO(cur_freq);
static ssize_t target_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
+ struct devfreq *df = to_devfreq(dev);
+
+ return sprintf(buf, "%lu\n", df->previous_freq);
}
static DEVICE_ATTR_RO(target_freq);
static ssize_t polling_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
+ struct devfreq *df = to_devfreq(dev);
+
+ if (!df->profile)
+ return -EINVAL;
+
+ return sprintf(buf, "%d\n", df->profile->polling_ms);
}
static ssize_t polling_interval_store(struct device *dev,
@@ -1531,6 +1567,9 @@ static ssize_t available_frequencies_show(struct device *d,
ssize_t count = 0;
int i;
+ if (!df->profile)
+ return -EINVAL;
+
mutex_lock(&df->lock);
for (i = 0; i < df->profile->max_state; i++)
@@ -1551,49 +1590,53 @@ static DEVICE_ATTR_RO(available_frequencies);
static ssize_t trans_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct devfreq *devfreq = to_devfreq(dev);
+ struct devfreq *df = to_devfreq(dev);
ssize_t len;
int i, j;
- unsigned int max_state = devfreq->profile->max_state;
+ unsigned int max_state;
+
+ if (!df->profile)
+ return -EINVAL;
+ max_state = df->profile->max_state;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
- mutex_lock(&devfreq->lock);
- if (!devfreq->stop_polling &&
- devfreq_update_status(devfreq, devfreq->previous_freq)) {
- mutex_unlock(&devfreq->lock);
+ mutex_lock(&df->lock);
+ if (!df->stop_polling &&
+ devfreq_update_status(df, df->previous_freq)) {
+ mutex_unlock(&df->lock);
return 0;
}
- mutex_unlock(&devfreq->lock);
+ mutex_unlock(&df->lock);
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
len += sprintf(buf + len, "%10lu",
- devfreq->profile->freq_table[i]);
+ df->profile->freq_table[i]);
len += sprintf(buf + len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
- if (devfreq->profile->freq_table[i]
- == devfreq->previous_freq) {
+ if (df->profile->freq_table[i]
+ == df->previous_freq) {
len += sprintf(buf + len, "*");
} else {
len += sprintf(buf + len, " ");
}
len += sprintf(buf + len, "%10lu:",
- devfreq->profile->freq_table[i]);
+ df->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
- devfreq->stats.trans_table[(i * max_state) + j]);
+ df->stats.trans_table[(i * max_state) + j]);
len += sprintf(buf + len, "%10llu\n", (u64)
- jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
- devfreq->stats.total_trans);
+ df->stats.total_trans);
return len;
}
@@ -1604,6 +1647,9 @@ static ssize_t trans_stat_store(struct device *dev,
struct devfreq *df = to_devfreq(dev);
int err, value;
+ if (!df->profile)
+ return -EINVAL;
+
if (df->profile->max_state == 0)
return count;
@@ -1625,6 +1671,69 @@ static ssize_t trans_stat_store(struct device *dev,
}
static DEVICE_ATTR_RW(trans_stat);
+static ssize_t timer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+
+ if (!df->profile)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
+}
+
+static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ char str_timer[DEVFREQ_NAME_LEN + 1];
+ int timer = -1;
+ int ret = 0, i;
+
+ if (!df->governor || !df->profile)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%16s", str_timer);
+ if (ret != 1)
+ return -EINVAL;
+
+ for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
+ if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
+ timer = i;
+ break;
+ }
+ }
+
+ if (timer < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (df->profile->timer == timer) {
+ ret = 0;
+ goto out;
+ }
+
+ mutex_lock(&df->lock);
+ df->profile->timer = timer;
+ mutex_unlock(&df->lock);
+
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+ __func__, df->governor->name, ret);
+ goto out;
+ }
+
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret)
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
+ __func__, df->governor->name, ret);
+out:
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(timer);
+
static struct attribute *devfreq_attrs[] = {
&dev_attr_name.attr,
&dev_attr_governor.attr,
@@ -1636,6 +1745,7 @@ static struct attribute *devfreq_attrs[] = {
&dev_attr_min_freq.attr,
&dev_attr_max_freq.attr,
&dev_attr_trans_stat.attr,
+ &dev_attr_timer.attr,
NULL,
};
ATTRIBUTE_GROUPS(devfreq);
@@ -1657,8 +1767,7 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
unsigned long cur_freq, min_freq, max_freq;
unsigned int polling_ms;
- seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
- "dev_name",
+ seq_printf(s, "%-30s %-30s %-15s %10s %12s %12s %12s\n",
"dev",
"parent_dev",
"governor",
@@ -1666,10 +1775,9 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
"cur_freq_Hz",
"min_freq_Hz",
"max_freq_Hz");
- seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+ seq_printf(s, "%30s %30s %15s %10s %12s %12s %12s\n",
+ "------------------------------",
"------------------------------",
- "----------",
- "----------",
"---------------",
"----------",
"------------",
@@ -1692,14 +1800,13 @@ static int devfreq_summary_show(struct seq_file *s, void *data)
#endif
mutex_lock(&devfreq->lock);
- cur_freq = devfreq->previous_freq,
+ cur_freq = devfreq->previous_freq;
get_freq_range(devfreq, &min_freq, &max_freq);
- polling_ms = devfreq->profile->polling_ms,
+ polling_ms = devfreq->profile->polling_ms;
mutex_unlock(&devfreq->lock);
seq_printf(s,
- "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
- dev_name(devfreq->dev.parent),
+ "%-30s %-30s %-15s %10d %12ld %12ld %12ld\n",
dev_name(&devfreq->dev),
p_devfreq ? dev_name(&p_devfreq->dev) : "null",
devfreq->governor_name,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 24f04f78285b..027769e39f9b 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
mutex_lock(&dmcfreq->lock);
- if (target_rate >= dmcfreq->odt_dis_freq)
- odt_enable = true;
-
- /*
- * This makes a SMC call to the TF-A to set the DDR PD (power-down)
- * timings and to enable or disable the ODT (on-die termination)
- * resistors.
- */
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
- dmcfreq->odt_pd_arg1,
- ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
- odt_enable, 0, 0, 0, &res);
+ if (dmcfreq->regmap_pmu) {
+ if (target_rate >= dmcfreq->odt_dis_freq)
+ odt_enable = true;
+
+ /*
+ * This makes a SMC call to the TF-A to set the DDR PD
+ * (power-down) timings and to enable or disable the
+ * ODT (on-die termination) resistors.
+ */
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+ dmcfreq->odt_pd_arg1,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+ odt_enable, 0, 0, 0, &res);
+ }
/*
* If frequency scaling from low to high, adjust voltage first.
@@ -371,13 +373,14 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
}
node = of_parse_phandle(np, "rockchip,pmu", 0);
- if (node) {
- data->regmap_pmu = syscon_node_to_regmap(node);
- of_node_put(node);
- if (IS_ERR(data->regmap_pmu)) {
- ret = PTR_ERR(data->regmap_pmu);
- goto err_edev;
- }
+ if (!node)
+ goto no_pmu;
+
+ data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu)) {
+ ret = PTR_ERR(data->regmap_pmu);
+ goto err_edev;
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
@@ -399,6 +402,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
goto err_edev;
};
+no_pmu:
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
ROCKCHIP_SIP_CONFIG_DRAM_INIT,
0, 0, 0, 0, &res);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 64c8955e0cf1..c888ae4fec96 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -271,20 +271,12 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
/* request and cfg rings */
- tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
- tx_chn->udma_tchan_id, 0);
- if (!tx_chn->ringtx) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get TX ring %u\n",
- tx_chn->udma_tchan_id);
- goto err;
- }
-
- tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
- -1, 0);
- if (!tx_chn->ringtxcq) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get TXCQ ring\n");
+ ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, -1,
+ &tx_chn->ringtx,
+ &tx_chn->ringtxcq);
+ if (ret) {
+ dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
goto err;
}
@@ -587,22 +579,16 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
}
/* request and cfg rings */
- flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
- flow_cfg->ring_rxq_id, 0);
- if (!flow->ringrx) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get RX ring\n");
+ ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id,
+ flow_cfg->ring_rxfdq0_id,
+ &flow->ringrxfdq,
+ &flow->ringrx);
+ if (ret) {
+ dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
goto err_rflow_put;
}
- flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
- flow_cfg->ring_rxfdq0_id, 0);
- if (!flow->ringrxfdq) {
- ret = -ENODEV;
- dev_err(dev, "Failed to get RXFDQ ring\n");
- goto err_ringrx_free;
- }
-
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
@@ -673,8 +659,6 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
err_ringrxfdq_free:
k3_ringacc_ring_free(flow->ringrxfdq);
-
-err_ringrx_free:
k3_ringacc_ring_free(flow->ringrx);
err_rflow_put:
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6c879a734360..49d0d3af6311 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1418,17 +1418,12 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
if (ret)
return ret;
- uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
- uc->tchan->id, 0);
- if (!uc->tchan->t_ring) {
- ret = -EBUSY;
- goto err_tx_ring;
- }
-
- uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
- if (!uc->tchan->tc_ring) {
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
+ &uc->tchan->t_ring,
+ &uc->tchan->tc_ring);
+ if (ret) {
ret = -EBUSY;
- goto err_txc_ring;
+ goto err_ring;
}
memset(&ring_cfg, 0, sizeof(ring_cfg));
@@ -1447,10 +1442,9 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
err_ringcfg:
k3_ringacc_ring_free(uc->tchan->tc_ring);
uc->tchan->tc_ring = NULL;
-err_txc_ring:
k3_ringacc_ring_free(uc->tchan->t_ring);
uc->tchan->t_ring = NULL;
-err_tx_ring:
+err_ring:
udma_put_tchan(uc);
return ret;
@@ -1499,16 +1493,11 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
rflow = uc->rflow;
fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
- rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
- if (!rflow->fd_ring) {
- ret = -EBUSY;
- goto err_rx_ring;
- }
-
- rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
- if (!rflow->r_ring) {
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
+ &rflow->fd_ring, &rflow->r_ring);
+ if (ret) {
ret = -EBUSY;
- goto err_rxc_ring;
+ goto err_ring;
}
memset(&ring_cfg, 0, sizeof(ring_cfg));
@@ -1533,10 +1522,9 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
err_ringcfg:
k3_ringacc_ring_free(rflow->r_ring);
rflow->r_ring = NULL;
-err_rxc_ring:
k3_ringacc_ring_free(rflow->fd_ring);
rflow->fd_ring = NULL;
-err_rx_ring:
+err_ring:
udma_put_rflow(uc);
err_rflow:
udma_put_rchan(uc);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 0e7ea3591b78..5e7593753799 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* Error exit stack */
err_kobj_reg:
+ kobject_put(&edac_dev->kobj);
module_put(edac_dev->owner);
err_out:
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 5813e931f2f0..01ff71f7b645 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -950,6 +950,8 @@ static void edac_ue_error(struct edac_raw_error_desc *e)
e->other_detail);
}
+ edac_inc_ue_error(e);
+
if (edac_mc_get_panic_on_ue()) {
panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
e->msg,
@@ -959,8 +961,6 @@ static void edac_ue_error(struct edac_raw_error_desc *e)
*e->other_detail ? " - " : "",
e->other_detail);
}
-
- edac_inc_ue_error(e);
}
static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 72c9eb9fdffb..53042af7262e 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
/* Error unwind statck */
kobject_init_and_add_fail:
- kfree(edac_pci_top_main_kobj);
+ kobject_put(edac_pci_top_main_kobj);
kzalloc_fail:
module_put(THIS_MODULE);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index cb3dab56a875..da60c29468a7 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -15,9 +15,7 @@
#include "edac_module.h"
#include <ras/ras_event.h>
-struct ghes_edac_pvt {
- struct list_head list;
- struct ghes *ghes;
+struct ghes_pvt {
struct mem_ctl_info *mci;
/* Buffers for the error handling routine */
@@ -32,7 +30,16 @@ static refcount_t ghes_refcount = REFCOUNT_INIT(0);
* also provides the necessary (implicit) memory barrier for the SMP
* case to make the pointer visible on another CPU.
*/
-static struct ghes_edac_pvt *ghes_pvt;
+static struct ghes_pvt *ghes_pvt;
+
+/*
+ * This driver's representation of the system hardware, as collected
+ * from DMI.
+ */
+struct ghes_hw_desc {
+ int num_dimms;
+ struct dimm_info *dimms;
+} ghes_hw;
/* GHES registration mutex */
static DEFINE_MUTEX(ghes_reg_mutex);
@@ -74,136 +81,165 @@ struct memdev_dmi_entry {
u16 conf_mem_clk_speed;
} __attribute__((__packed__));
-struct ghes_edac_dimm_fill {
- struct mem_ctl_info *mci;
- unsigned int count;
-};
+static struct dimm_info *find_dimm_by_handle(struct mem_ctl_info *mci, u16 handle)
+{
+ struct dimm_info *dimm;
+
+ mci_for_each_dimm(mci, dimm) {
+ if (dimm->smbios_handle == handle)
+ return dimm;
+ }
+
+ return NULL;
+}
-static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
+static void dimm_setup_label(struct dimm_info *dimm, u16 handle)
{
- int *num_dimm = arg;
+ const char *bank = NULL, *device = NULL;
+
+ dmi_memdev_name(handle, &bank, &device);
- if (dh->type == DMI_ENTRY_MEM_DEVICE)
- (*num_dimm)++;
+ /* both strings must be non-zero */
+ if (bank && *bank && device && *device)
+ snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device);
}
-static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
+static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry)
{
- struct dimm_info *dimm;
+ u16 rdr_mask = BIT(7) | BIT(13);
- mci_for_each_dimm(mci, dimm) {
- if (dimm->smbios_handle == handle)
- return dimm->idx;
+ if (entry->size == 0xffff) {
+ pr_info("Can't get DIMM%i size\n", dimm->idx);
+ dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
+ } else if (entry->size == 0x7fff) {
+ dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
+ } else {
+ if (entry->size & BIT(15))
+ dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10);
+ else
+ dimm->nr_pages = MiB_TO_PAGES(entry->size);
+ }
+
+ switch (entry->memory_type) {
+ case 0x12:
+ if (entry->type_detail & BIT(13))
+ dimm->mtype = MEM_RDDR;
+ else
+ dimm->mtype = MEM_DDR;
+ break;
+ case 0x13:
+ if (entry->type_detail & BIT(13))
+ dimm->mtype = MEM_RDDR2;
+ else
+ dimm->mtype = MEM_DDR2;
+ break;
+ case 0x14:
+ dimm->mtype = MEM_FB_DDR2;
+ break;
+ case 0x18:
+ if (entry->type_detail & BIT(12))
+ dimm->mtype = MEM_NVDIMM;
+ else if (entry->type_detail & BIT(13))
+ dimm->mtype = MEM_RDDR3;
+ else
+ dimm->mtype = MEM_DDR3;
+ break;
+ case 0x1a:
+ if (entry->type_detail & BIT(12))
+ dimm->mtype = MEM_NVDIMM;
+ else if (entry->type_detail & BIT(13))
+ dimm->mtype = MEM_RDDR4;
+ else
+ dimm->mtype = MEM_DDR4;
+ break;
+ default:
+ if (entry->type_detail & BIT(6))
+ dimm->mtype = MEM_RMBS;
+ else if ((entry->type_detail & rdr_mask) == rdr_mask)
+ dimm->mtype = MEM_RDR;
+ else if (entry->type_detail & BIT(7))
+ dimm->mtype = MEM_SDR;
+ else if (entry->type_detail & BIT(9))
+ dimm->mtype = MEM_EDO;
+ else
+ dimm->mtype = MEM_UNKNOWN;
}
- return -1;
+ /*
+ * Actually, we can only detect if the memory has bits for
+ * checksum or not
+ */
+ if (entry->total_width == entry->data_width)
+ dimm->edac_mode = EDAC_NONE;
+ else
+ dimm->edac_mode = EDAC_SECDED;
+
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->grain = 128; /* Likely, worse case */
+
+ dimm_setup_label(dimm, entry->handle);
+
+ if (dimm->nr_pages) {
+ edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
+ dimm->idx, edac_mem_types[dimm->mtype],
+ PAGES_TO_MiB(dimm->nr_pages),
+ (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
+ edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
+ entry->memory_type, entry->type_detail,
+ entry->total_width, entry->data_width);
+ }
+
+ dimm->smbios_handle = entry->handle;
}
-static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
+static void enumerate_dimms(const struct dmi_header *dh, void *arg)
{
- struct ghes_edac_dimm_fill *dimm_fill = arg;
- struct mem_ctl_info *mci = dimm_fill->mci;
-
- if (dh->type == DMI_ENTRY_MEM_DEVICE) {
- struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
- struct dimm_info *dimm = edac_get_dimm(mci, dimm_fill->count, 0, 0);
- u16 rdr_mask = BIT(7) | BIT(13);
-
- if (entry->size == 0xffff) {
- pr_info("Can't get DIMM%i size\n",
- dimm_fill->count);
- dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
- } else if (entry->size == 0x7fff) {
- dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
- } else {
- if (entry->size & BIT(15))
- dimm->nr_pages = MiB_TO_PAGES((entry->size & 0x7fff) << 10);
- else
- dimm->nr_pages = MiB_TO_PAGES(entry->size);
- }
+ struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
+ struct ghes_hw_desc *hw = (struct ghes_hw_desc *)arg;
+ struct dimm_info *d;
- switch (entry->memory_type) {
- case 0x12:
- if (entry->type_detail & BIT(13))
- dimm->mtype = MEM_RDDR;
- else
- dimm->mtype = MEM_DDR;
- break;
- case 0x13:
- if (entry->type_detail & BIT(13))
- dimm->mtype = MEM_RDDR2;
- else
- dimm->mtype = MEM_DDR2;
- break;
- case 0x14:
- dimm->mtype = MEM_FB_DDR2;
- break;
- case 0x18:
- if (entry->type_detail & BIT(12))
- dimm->mtype = MEM_NVDIMM;
- else if (entry->type_detail & BIT(13))
- dimm->mtype = MEM_RDDR3;
- else
- dimm->mtype = MEM_DDR3;
- break;
- case 0x1a:
- if (entry->type_detail & BIT(12))
- dimm->mtype = MEM_NVDIMM;
- else if (entry->type_detail & BIT(13))
- dimm->mtype = MEM_RDDR4;
- else
- dimm->mtype = MEM_DDR4;
- break;
- default:
- if (entry->type_detail & BIT(6))
- dimm->mtype = MEM_RMBS;
- else if ((entry->type_detail & rdr_mask) == rdr_mask)
- dimm->mtype = MEM_RDR;
- else if (entry->type_detail & BIT(7))
- dimm->mtype = MEM_SDR;
- else if (entry->type_detail & BIT(9))
- dimm->mtype = MEM_EDO;
- else
- dimm->mtype = MEM_UNKNOWN;
- }
+ if (dh->type != DMI_ENTRY_MEM_DEVICE)
+ return;
- /*
- * Actually, we can only detect if the memory has bits for
- * checksum or not
- */
- if (entry->total_width == entry->data_width)
- dimm->edac_mode = EDAC_NONE;
- else
- dimm->edac_mode = EDAC_SECDED;
+ /* Enlarge the array with additional 16 */
+ if (!hw->num_dimms || !(hw->num_dimms % 16)) {
+ struct dimm_info *new;
- dimm->dtype = DEV_UNKNOWN;
- dimm->grain = 128; /* Likely, worse case */
-
- /*
- * FIXME: It shouldn't be hard to also fill the DIMM labels
- */
-
- if (dimm->nr_pages) {
- edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
- dimm_fill->count, edac_mem_types[dimm->mtype],
- PAGES_TO_MiB(dimm->nr_pages),
- (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
- edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
- entry->memory_type, entry->type_detail,
- entry->total_width, entry->data_width);
+ new = krealloc(hw->dimms, (hw->num_dimms + 16) * sizeof(struct dimm_info),
+ GFP_KERNEL);
+ if (!new) {
+ WARN_ON_ONCE(1);
+ return;
}
- dimm->smbios_handle = entry->handle;
-
- dimm_fill->count++;
+ hw->dimms = new;
}
+
+ d = &hw->dimms[hw->num_dimms];
+ d->idx = hw->num_dimms;
+
+ assign_dmi_dimm_info(d, entry);
+
+ hw->num_dimms++;
+}
+
+static void ghes_scan_system(void)
+{
+ static bool scanned;
+
+ if (scanned)
+ return;
+
+ dmi_walk(enumerate_dimms, &ghes_hw);
+
+ scanned = true;
}
void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
{
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt;
+ struct ghes_pvt *pvt;
unsigned long flags;
char *p;
@@ -228,7 +264,6 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
memset(e, 0, sizeof (*e));
e->error_count = 1;
e->grain = 1;
- strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
e->top_layer = -1;
@@ -345,7 +380,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
const char *bank = NULL, *device = NULL;
- int index = -1;
+ struct dimm_info *dimm;
dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
if (bank != NULL && device != NULL)
@@ -354,13 +389,18 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
- index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
- if (index >= 0)
- e->top_layer = index;
+ dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle);
+ if (dimm) {
+ e->top_layer = dimm->idx;
+ strcpy(e->label, dimm->label);
+ }
}
if (p > e->location)
*(p - 1) = '\0';
+ if (!*e->label)
+ strcpy(e->label, "unknown memory");
+
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
p += snprintf(p, sizeof(pvt->other_detail),
@@ -455,13 +495,12 @@ static struct acpi_platform_list plat_list[] = {
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
bool fake = false;
- int rc = 0, num_dimm = 0;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt;
+ struct ghes_pvt *pvt;
struct edac_mc_layer layers[1];
- struct ghes_edac_dimm_fill dimm_fill;
unsigned long flags;
int idx = -1;
+ int rc = 0;
if (IS_ENABLED(CONFIG_X86)) {
/* Check if safe to enable on this system */
@@ -481,20 +520,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (refcount_inc_not_zero(&ghes_refcount))
goto unlock;
- /* Get the number of DIMMs */
- dmi_walk(ghes_edac_count_dimms, &num_dimm);
+ ghes_scan_system();
/* Check if we've got a bogus BIOS */
- if (num_dimm == 0) {
+ if (!ghes_hw.num_dimms) {
fake = true;
- num_dimm = 1;
+ ghes_hw.num_dimms = 1;
}
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
- layers[0].size = num_dimm;
+ layers[0].size = ghes_hw.num_dimms;
layers[0].is_virt_csrow = true;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
rc = -ENOMEM;
@@ -502,7 +540,6 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
}
pvt = mci->pvt_info;
- pvt->ghes = ghes;
pvt->mci = mci;
mci->pdev = dev;
@@ -523,13 +560,34 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
pr_info("If you find incorrect reports, please contact your hardware vendor\n");
pr_info("to correct its BIOS.\n");
- pr_info("This system has %d DIMM sockets.\n", num_dimm);
+ pr_info("This system has %d DIMM sockets.\n", ghes_hw.num_dimms);
}
if (!fake) {
- dimm_fill.count = 0;
- dimm_fill.mci = mci;
- dmi_walk(ghes_edac_dmidecode, &dimm_fill);
+ struct dimm_info *src, *dst;
+ int i = 0;
+
+ mci_for_each_dimm(mci, dst) {
+ src = &ghes_hw.dimms[i];
+
+ dst->idx = src->idx;
+ dst->smbios_handle = src->smbios_handle;
+ dst->nr_pages = src->nr_pages;
+ dst->mtype = src->mtype;
+ dst->edac_mode = src->edac_mode;
+ dst->dtype = src->dtype;
+ dst->grain = src->grain;
+
+ /*
+ * If no src->label, preserve default label assigned
+ * from EDAC core.
+ */
+ if (strlen(src->label))
+ memcpy(dst->label, src->label, sizeof(src->label));
+
+ i++;
+ }
+
} else {
struct dimm_info *dimm = edac_get_dimm(mci, 0, 0, 0);
@@ -542,7 +600,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
rc = edac_mc_add_mc(mci);
if (rc < 0) {
- pr_info("Can't register at EDAC core\n");
+ pr_info("Can't register with the EDAC core\n");
edac_mc_free(mci);
rc = -ENODEV;
goto unlock;
@@ -556,6 +614,11 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
refcount_set(&ghes_refcount, 1);
unlock:
+
+ /* Not needed anymore */
+ kfree(ghes_hw.dimms);
+ ghes_hw.dimms = NULL;
+
mutex_unlock(&ghes_reg_mutex);
return rc;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 9b0044cd21cd..c8d11da85bec 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -135,9 +135,11 @@ static struct res_config i10nm_cfg1 = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &i10nm_cfg0),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &i10nm_cfg0),
- X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &i10nm_cfg1),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -264,10 +266,6 @@ static int __init i10nm_init(void)
cfg = (struct res_config *)id->driver_data;
- /* Newer steppings have different offset for ATOM_TREMONT_D/ICELAKE_X */
- if (boot_cpu_data.x86_stepping >= 4)
- cfg->busno_cfg_offset = 0xd0;
-
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
if (rc)
return rc;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 2b5401db56ad..325aedf46ff2 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1094,6 +1094,9 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
+ if (m->ppin)
+ pr_emerg(HW_ERR "PPIN: 0x%016llx\n", m->ppin);
+
if (boot_cpu_has(X86_FEATURE_SMCA)) {
pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index c1f2e6deb021..fd363746f5b0 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1432,6 +1432,7 @@ static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, vo
static struct notifier_block pnd2_mce_dec = {
.notifier_call = pnd2_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
};
#ifdef CONFIG_EDAC_DEBUG
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index b907a0f4ece6..2c7db95df326 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 54fdc39cd0bc..7dde21b18b04 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1099,7 +1099,7 @@ static void context_tasklet(unsigned long data)
static int context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
- dma_addr_t uninitialized_var(bus_addr);
+ dma_addr_t bus_addr;
int offset;
/*
@@ -1289,7 +1289,7 @@ static int at_context_queue_packet(struct context *ctx,
struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
- dma_addr_t d_bus, uninitialized_var(payload_bus);
+ dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
@@ -2445,7 +2445,7 @@ static int ohci_set_config_rom(struct fw_card *card,
{
struct fw_ohci *ohci;
__be32 *next_config_rom;
- dma_addr_t uninitialized_var(next_config_rom_bus);
+ dma_addr_t next_config_rom_bus;
ohci = fw_ohci(card);
@@ -2933,10 +2933,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct iso_context *uninitialized_var(ctx);
- descriptor_callback_t uninitialized_var(callback);
- u64 *uninitialized_var(channels);
- u32 *uninitialized_var(mask), uninitialized_var(regs);
+ struct iso_context *ctx;
+ descriptor_callback_t callback;
+ u64 *channels;
+ u32 *mask, regs;
int index, ret = -EBUSY;
spin_lock_irq(&ohci->lock);
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 1cad32b38b29..6f9cbc4aef22 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
-scmi-driver-y = driver.o
+scmi-driver-y = driver.o notify.o
scmi-transport-y = shmem.o
scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
-scmi-transport-$(CONFIG_ARM_PSCI_FW) += smc.o
+scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index ce7d9203e41b..9853bd3c4d45 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -5,7 +5,15 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
+
+#define SCMI_BASE_NUM_SOURCES 1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
enum scmi_base_protocol_cmd {
BASE_DISCOVER_VENDOR = 0x3,
@@ -19,16 +27,25 @@ enum scmi_base_protocol_cmd {
BASE_RESET_AGENT_CONFIGURATION = 0xb,
};
-enum scmi_base_protocol_notify {
- BASE_ERROR_EVENT = 0x0,
-};
-
struct scmi_msg_resp_base_attributes {
u8 num_protocols;
u8 num_agents;
__le16 reserved;
};
+struct scmi_msg_base_error_notify {
+ __le32 event_control;
+#define BASE_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_base_error_notify_payld {
+ __le32 agent_id;
+ __le32 error_status;
+#define IS_FATAL_ERROR(x) ((x) & BIT(31))
+#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x))
+ __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
+};
+
/**
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
@@ -222,6 +239,83 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
return ret;
}
+static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_base_error_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
+ SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_base_error_notify(handle, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ int i;
+ const struct scmi_base_error_notify_payld *p = payld;
+ struct scmi_base_error_report *r = report;
+
+ /*
+ * BaseError notification payload is variable in size but
+ * up to a maximum length determined by the struct ponted by p.
+ * Instead payld_sz is the effective length of this notification
+ * payload so cannot be greater of the maximum allowed size as
+ * pointed by p.
+ */
+ if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
+ r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
+ for (i = 0; i < r->cmd_count; i++)
+ r->reports[i] = le64_to_cpu(p->msg_reports[i]);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event base_events[] = {
+ {
+ .id = SCMI_EVENT_BASE_ERROR_EVENT,
+ .max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
+ .max_report_sz = sizeof(struct scmi_base_error_report) +
+ SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
+ },
+};
+
+static const struct scmi_event_ops base_event_ops = {
+ .set_notify_enabled = scmi_base_set_notify_enabled,
+ .fill_custom_report = scmi_base_fill_custom_report,
+};
+
int scmi_base_protocol_init(struct scmi_handle *h)
{
int id, ret;
@@ -256,6 +350,12 @@ int scmi_base_protocol_init(struct scmi_handle *h)
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
+ scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
+ (4 * SCMI_PROTO_QUEUE_SZ),
+ &base_event_ops, base_events,
+ ARRAY_SIZE(base_events),
+ SCMI_BASE_NUM_SOURCES);
+
for (id = 0; id < rev->num_agents; id++) {
scmi_base_discover_agent_get(handle, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4c2227662b26..75e39882746e 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#include <linux/sort.h>
+
#include "common.h"
enum scmi_clock_protocol_cmd {
@@ -121,11 +123,23 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
return ret;
}
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
static int
scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_clock_info *clk)
{
- u64 *rate;
+ u64 *rate = NULL;
int ret, cnt;
bool rate_discrete = false;
u32 tot_rate_cnt = 0, rates_flag;
@@ -184,8 +198,10 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
*/
} while (num_returned && num_remaining);
- if (rate_discrete)
+ if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
+ sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ }
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 31fe5a22a011..c113e578cc6c 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -6,6 +6,8 @@
*
* Copyright (C) 2018 ARM Ltd.
*/
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
#include <linux/bitfield.h>
#include <linux/completion.h>
@@ -235,3 +237,5 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 7483cacf63f9..03ec74242c14 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "common.h"
+#include "notify.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
@@ -208,7 +209,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->rx_minfo;
+ ktime_t ts;
+ ts = ktime_get_boottime();
xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
@@ -221,6 +224,8 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
scmi_dump_header_dbg(dev, &xfer->hdr);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
+ scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
+ xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
@@ -392,8 +397,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
info->desc->ops->mark_txdone(cinfo, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.status);
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
return ret;
}
@@ -789,6 +793,9 @@ static int scmi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (scmi_notification_init(handle))
+ dev_err(dev, "SCMI Notifications NOT available.\n");
+
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -831,6 +838,8 @@ static int scmi_remove(struct platform_device *pdev)
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
+ scmi_notification_exit(&info->handle);
+
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
@@ -901,7 +910,7 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
-#ifdef CONFIG_ARM_PSCI_FW
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
{ /* Sentinel */ },
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
new file mode 100644
index 000000000000..4731daaacd19
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Notification support
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * SCMI Protocol specification allows the platform to signal events to
+ * interested agents via notification messages: this is an implementation
+ * of the dispatch and delivery of such notifications to the interested users
+ * inside the Linux kernel.
+ *
+ * An SCMI Notification core instance is initialized for each active platform
+ * instance identified by the means of the usual &struct scmi_handle.
+ *
+ * Each SCMI Protocol implementation, during its initialization, registers with
+ * this core its set of supported events using scmi_register_protocol_events():
+ * all the needed descriptors are stored in the &struct registered_protocols and
+ * &struct registered_events arrays.
+ *
+ * Kernel users interested in some specific event can register their callbacks
+ * providing the usual notifier_block descriptor, since this core implements
+ * events' delivery using the standard Kernel notification chains machinery.
+ *
+ * Given the number of possible events defined by SCMI and the extensibility
+ * of the SCMI Protocol itself, the underlying notification chains are created
+ * and destroyed dynamically on demand depending on the number of users
+ * effectively registered for an event, so that no support structures or chains
+ * are allocated until at least one user has registered a notifier_block for
+ * such event. Similarly, events' generation itself is enabled at the platform
+ * level only after at least one user has registered, and it is shutdown after
+ * the last user for that event has gone.
+ *
+ * All users provided callbacks and allocated notification-chains are stored in
+ * the @registered_events_handlers hashtable. Callbacks' registration requests
+ * for still to be registered events are instead kept in the dedicated common
+ * hashtable @pending_events_handlers.
+ *
+ * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
+ * and is served by its own dedicated notification chain; information contained
+ * in such tuples is used, in a few different ways, to generate the needed
+ * hash-keys.
+ *
+ * Here proto_id and evt_id are simply the protocol_id and message_id numbers
+ * as described in the SCMI Protocol specification, while src_id represents an
+ * optional, protocol dependent, source identifier (like domain_id, perf_id
+ * or sensor_id and so forth).
+ *
+ * Upon reception of a notification message from the platform the SCMI RX ISR
+ * passes the received message payload and some ancillary information (including
+ * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
+ * pushes the event-data itself on a protocol-dedicated kfifo queue for further
+ * deferred processing as specified in @scmi_events_dispatcher().
+ *
+ * Each protocol has it own dedicated work_struct and worker which, once kicked
+ * by the ISR, takes care to empty its own dedicated queue, deliverying the
+ * queued items into the proper notification-chain: notifications processing can
+ * proceed concurrently on distinct workers only between events belonging to
+ * different protocols while delivery of events within the same protocol is
+ * still strictly sequentially ordered by time of arrival.
+ *
+ * Events' information is then extracted from the SCMI Notification messages and
+ * conveyed, converted into a custom per-event report struct, as the void *data
+ * param to the user callback provided by the registered notifier_block, so that
+ * from the user perspective his callback will look invoked like:
+ *
+ * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
+ *
+ */
+
+#define dev_fmt(fmt) "SCMI Notifications - " fmt
+#define pr_fmt(fmt) "SCMI Notifications - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "notify.h"
+
+#define SCMI_MAX_PROTO 256
+
+#define PROTO_ID_MASK GENMASK(31, 24)
+#define EVT_ID_MASK GENMASK(23, 16)
+#define SRC_ID_MASK GENMASK(15, 0)
+
+/*
+ * Builds an unsigned 32bit key from the given input tuple to be used
+ * as a key in hashtables.
+ */
+#define MAKE_HASH_KEY(p, e, s) \
+ (FIELD_PREP(PROTO_ID_MASK, (p)) | \
+ FIELD_PREP(EVT_ID_MASK, (e)) | \
+ FIELD_PREP(SRC_ID_MASK, (s)))
+
+#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
+
+/*
+ * Assumes that the stored obj includes its own hash-key in a field named 'key':
+ * with this simplification this macro can be equally used for all the objects'
+ * types hashed by this implementation.
+ *
+ * @__ht: The hashtable name
+ * @__obj: A pointer to the object type to be retrieved from the hashtable;
+ * it will be used as a cursor while scanning the hastable and it will
+ * be possibly left as NULL when @__k is not found
+ * @__k: The key to search for
+ */
+#define KEY_FIND(__ht, __obj, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ typeof(__obj) obj_; \
+ \
+ hash_for_each_possible((__ht), obj_, hash, k_) \
+ if (obj_->key == k_) \
+ break; \
+ __obj = obj_; \
+})
+
+#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
+#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
+#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
+
+/*
+ * A set of macros used to access safely @registered_protocols and
+ * @registered_events arrays; these are fixed in size and each entry is possibly
+ * populated at protocols' registration time and then only read but NEVER
+ * modified or removed.
+ */
+#define SCMI_GET_PROTO(__ni, __pid) \
+({ \
+ typeof(__ni) ni_ = __ni; \
+ struct scmi_registered_events_desc *__pd = NULL; \
+ \
+ if (ni_) \
+ __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
+ __pd; \
+})
+
+#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
+({ \
+ typeof(__pd) pd_ = __pd; \
+ typeof(__eid) eid_ = __eid; \
+ struct scmi_registered_event *__revt = NULL; \
+ \
+ if (pd_ && eid_ < pd_->num_events) \
+ __revt = READ_ONCE(pd_->registered_events[eid_]); \
+ __revt; \
+})
+
+#define SCMI_GET_REVT(__ni, __pid, __eid) \
+({ \
+ struct scmi_registered_event *__revt; \
+ struct scmi_registered_events_desc *__pd; \
+ \
+ __pd = SCMI_GET_PROTO((__ni), (__pid)); \
+ __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
+ __revt; \
+})
+
+/* A couple of utility macros to limit cruft when calling protocols' helpers */
+#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->set_notify_enabled(r->proto->ni->handle, \
+ (eid), (sid), (state)); \
+})
+
+#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
+
+#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
+
+#define REVT_FILL_REPORT(revt, ...) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->fill_custom_report(r->proto->ni->handle, \
+ __VA_ARGS__); \
+})
+
+#define SCMI_PENDING_HASH_SZ 4
+#define SCMI_REGISTERED_HASH_SZ 6
+
+struct scmi_registered_events_desc;
+
+/**
+ * struct scmi_notify_instance - Represents an instance of the notification
+ * core
+ * @gid: GroupID used for devres
+ * @handle: A reference to the platform instance
+ * @init_work: A work item to perform final initializations of pending handlers
+ * @notify_wq: A reference to the allocated Kernel cmwq
+ * @pending_mtx: A mutex to protect @pending_events_handlers
+ * @registered_protocols: A statically allocated array containing pointers to
+ * all the registered protocol-level specific information
+ * related to events' handling
+ * @pending_events_handlers: An hashtable containing all pending events'
+ * handlers descriptors
+ *
+ * Each platform instance, represented by a handle, has its own instance of
+ * the notification subsystem represented by this structure.
+ */
+struct scmi_notify_instance {
+ void *gid;
+ struct scmi_handle *handle;
+ struct work_struct init_work;
+ struct workqueue_struct *notify_wq;
+ /* lock to protect pending_events_handlers */
+ struct mutex pending_mtx;
+ struct scmi_registered_events_desc **registered_protocols;
+ DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
+};
+
+/**
+ * struct events_queue - Describes a queue and its associated worker
+ * @sz: Size in bytes of the related kfifo
+ * @kfifo: A dedicated Kernel kfifo descriptor
+ * @notify_work: A custom work item bound to this queue
+ * @wq: A reference to the associated workqueue
+ *
+ * Each protocol has its own dedicated events_queue descriptor.
+ */
+struct events_queue {
+ size_t sz;
+ struct kfifo kfifo;
+ struct work_struct notify_work;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct scmi_event_header - A utility header
+ * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
+ * to this event as soon as it entered the SCMI RX ISR
+ * @payld_sz: Effective size of the embedded message payload which follows
+ * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
+ * @payld: A reference to the embedded event payload
+ *
+ * This header is prepended to each received event message payload before
+ * queueing it on the related &struct events_queue.
+ */
+struct scmi_event_header {
+ ktime_t timestamp;
+ size_t payld_sz;
+ unsigned char evt_id;
+ unsigned char payld[];
+};
+
+struct scmi_registered_event;
+
+/**
+ * struct scmi_registered_events_desc - Protocol Specific information
+ * @id: Protocol ID
+ * @ops: Protocol specific and event-related operations
+ * @equeue: The embedded per-protocol events_queue
+ * @ni: A reference to the initialized instance descriptor
+ * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
+ * deferred worker when fetching data from the kfifo
+ * @eh_sz: Size of the pre-allocated buffer @eh
+ * @in_flight: A reference to an in flight &struct scmi_registered_event
+ * @num_events: Number of events in @registered_events
+ * @registered_events: A dynamically allocated array holding all the registered
+ * events' descriptors, whose fixed-size is determined at
+ * compile time.
+ * @registered_mtx: A mutex to protect @registered_events_handlers
+ * @registered_events_handlers: An hashtable containing all events' handlers
+ * descriptors registered for this protocol
+ *
+ * All protocols that register at least one event have their protocol-specific
+ * information stored here, together with the embedded allocated events_queue.
+ * These descriptors are stored in the @registered_protocols array at protocol
+ * registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that, once
+ * we safely grab a NON-NULL reference from the array we can keep it and use it.
+ */
+struct scmi_registered_events_desc {
+ u8 id;
+ const struct scmi_event_ops *ops;
+ struct events_queue equeue;
+ struct scmi_notify_instance *ni;
+ struct scmi_event_header *eh;
+ size_t eh_sz;
+ void *in_flight;
+ int num_events;
+ struct scmi_registered_event **registered_events;
+ /* mutex to protect registered_events_handlers */
+ struct mutex registered_mtx;
+ DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
+};
+
+/**
+ * struct scmi_registered_event - Event Specific Information
+ * @proto: A reference to the associated protocol descriptor
+ * @evt: A reference to the associated event descriptor (as provided at
+ * registration time)
+ * @report: A pre-allocated buffer used by the deferred worker to fill a
+ * customized event report
+ * @num_sources: The number of possible sources for this event as stated at
+ * events' registration time
+ * @sources: A reference to a dynamically allocated array used to refcount the
+ * events' enable requests for all the existing sources
+ * @sources_mtx: A mutex to serialize the access to @sources
+ *
+ * All registered events are represented by one of these structures that are
+ * stored in the @registered_events array at protocol registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that once we
+ * safely grab a NON-NULL reference from the table we can keep it and use it.
+ */
+struct scmi_registered_event {
+ struct scmi_registered_events_desc *proto;
+ const struct scmi_event *evt;
+ void *report;
+ u32 num_sources;
+ refcount_t *sources;
+ /* locking to serialize the access to sources */
+ struct mutex sources_mtx;
+};
+
+/**
+ * struct scmi_event_handler - Event handler information
+ * @key: The used hashkey
+ * @users: A reference count for number of active users for this handler
+ * @r_evt: A reference to the associated registered event; when this is NULL
+ * this handler is pending, which means that identifies a set of
+ * callbacks intended to be attached to an event which is still not
+ * known nor registered by any protocol at that point in time
+ * @chain: The notification chain dedicated to this specific event tuple
+ * @hash: The hlist_node used for collision handling
+ * @enabled: A boolean which records if event's generation has been already
+ * enabled for this handler as a whole
+ *
+ * This structure collects all the information needed to process a received
+ * event identified by the tuple (proto_id, evt_id, src_id).
+ * These descriptors are stored in a per-protocol @registered_events_handlers
+ * table using as a key a value derived from that tuple.
+ */
+struct scmi_event_handler {
+ u32 key;
+ refcount_t users;
+ struct scmi_registered_event *r_evt;
+ struct blocking_notifier_head chain;
+ struct hlist_node hash;
+ bool enabled;
+};
+
+#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
+
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+
+/**
+ * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The key to use to lookup the related notification chain
+ * @report: The customized event-specific report to pass down to the callbacks
+ * as their *data parameter.
+ */
+static inline void
+scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
+ u32 evt_key, void *report)
+{
+ int ret;
+ struct scmi_event_handler *hndl;
+
+ /*
+ * Here ensure the event handler cannot vanish while using it.
+ * It is legitimate, though, for an handler not to be found at all here,
+ * e.g. when it has been unregistered by the user after some events had
+ * already been queued.
+ */
+ hndl = scmi_get_active_handler(ni, evt_key);
+ if (!hndl)
+ return;
+
+ ret = blocking_notifier_call_chain(&hndl->chain,
+ KEY_XTRACT_EVT_ID(evt_key),
+ report);
+ /* Notifiers are NOT supposed to cut the chain ... */
+ WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
+
+ scmi_put_active_handler(ni, hndl);
+}
+
+/**
+ * scmi_process_event_header() - Dequeue and process an event header
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ *
+ * Read an event header from the protocol queue into the dedicated scratch
+ * buffer and looks for a matching registered event; in case an anomalously
+ * sized read is detected just flush the queue.
+ *
+ * Return:
+ * * a reference to the matching registered event when found
+ * * ERR_PTR(-EINVAL) when NO registered event could be found
+ * * NULL when the queue is empty
+ */
+static inline struct scmi_registered_event *
+scmi_process_event_header(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd)
+{
+ unsigned int outs;
+ struct scmi_registered_event *r_evt;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh,
+ sizeof(struct scmi_event_header));
+ if (!outs)
+ return NULL;
+ if (outs != sizeof(struct scmi_event_header)) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return NULL;
+ }
+
+ r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
+ if (!r_evt)
+ r_evt = ERR_PTR(-EINVAL);
+
+ return r_evt;
+}
+
+/**
+ * scmi_process_event_payload() - Dequeue and process an event payload
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ * @r_evt: The registered event descriptor to use
+ *
+ * Read an event payload from the protocol queue into the dedicated scratch
+ * buffer, fills a custom report and then look for matching event handlers and
+ * call them; skip any unknown event (as marked by scmi_process_event_header())
+ * and in case an anomalously sized read is detected just flush the queue.
+ *
+ * Return: False when the queue is empty
+ */
+static inline bool
+scmi_process_event_payload(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd,
+ struct scmi_registered_event *r_evt)
+{
+ u32 src_id, key;
+ unsigned int outs;
+ void *report = NULL;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
+ if (!outs)
+ return false;
+
+ /* Any in-flight event has now been officially processed */
+ pd->in_flight = NULL;
+
+ if (outs != pd->eh->payld_sz) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return false;
+ }
+
+ if (IS_ERR(r_evt)) {
+ dev_warn(pd->ni->handle->dev,
+ "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
+ pd->eh->payld, pd->eh->payld_sz,
+ r_evt->report, &src_id);
+ if (!report) {
+ dev_err(pd->ni->handle->dev,
+ "report not available - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ /* At first search for a generic ALL src_ids handler... */
+ key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ /* ...then search for any specific src_id */
+ key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ return true;
+}
+
+/**
+ * scmi_events_dispatcher() - Common worker logic for all work items.
+ * @work: The work item to use, which is associated to a dedicated events_queue
+ *
+ * Logic:
+ * 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
+ * 2. generate a custom event report from the received event message
+ * 3. lookup for any registered ALL_SRC_IDs handler:
+ * - > call the related notification chain passing in the report
+ * 4. lookup for any registered specific SRC_ID handler:
+ * - > call the related notification chain passing in the report
+ *
+ * Note that:
+ * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
+ * flood of events cannot saturate other protocols' queues.
+ * * each per-protocol queue is associated to a distinct work_item, which
+ * means, in turn, that:
+ * + all protocols can process their dedicated queues concurrently
+ * (since notify_wq:max_active != 1)
+ * + anyway at most one worker instance is allowed to run on the same queue
+ * concurrently: this ensures that we can have only one concurrent
+ * reader/writer on the associated kfifo, so that we can use it lock-less
+ *
+ * Context: Process context.
+ */
+static void scmi_events_dispatcher(struct work_struct *work)
+{
+ struct events_queue *eq;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_registered_event *r_evt;
+
+ eq = container_of(work, struct events_queue, notify_work);
+ pd = container_of(eq, struct scmi_registered_events_desc, equeue);
+ /*
+ * In order to keep the queue lock-less and the number of memcopies
+ * to the bare minimum needed, the dispatcher accounts for the
+ * possibility of per-protocol in-flight events: i.e. an event whose
+ * reception could end up being split across two subsequent runs of this
+ * worker, first the header, then the payload.
+ */
+ do {
+ if (!pd->in_flight) {
+ r_evt = scmi_process_event_header(eq, pd);
+ if (!r_evt)
+ break;
+ pd->in_flight = r_evt;
+ } else {
+ r_evt = pd->in_flight;
+ }
+ } while (scmi_process_event_payload(eq, pd, r_evt));
+}
+
+/**
+ * scmi_notify() - Queues a notification for further deferred processing
+ * @handle: The handle identifying the platform instance from which the
+ * dispatched event is generated
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID (msgID)
+ * @buf: Event Message Payload (without the header)
+ * @len: Event Message Payload size
+ * @ts: RX Timestamp in nanoseconds (boottime)
+ *
+ * Context: Called in interrupt context to queue a received event for
+ * deferred processing.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_header eh;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return 0;
+ ni = handle->notify_priv;
+
+ r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
+ if (!r_evt)
+ return -EINVAL;
+
+ if (len > r_evt->evt->max_payld_sz) {
+ dev_err(handle->dev, "discard badly sized message\n");
+ return -EINVAL;
+ }
+ if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
+ dev_warn(handle->dev,
+ "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
+ proto_id, evt_id, ktime_to_ns(ts));
+ return -ENOMEM;
+ }
+
+ eh.timestamp = ts;
+ eh.evt_id = evt_id;
+ eh.payld_sz = len;
+ /*
+ * Header and payload are enqueued with two distinct kfifo_in() (so non
+ * atomic), but this situation is handled properly on the consumer side
+ * with in-flight events tracking.
+ */
+ kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
+ kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
+ /*
+ * Don't care about return value here since we just want to ensure that
+ * a work is queued all the times whenever some items have been pushed
+ * on the kfifo:
+ * - if work was already queued it will simply fail to queue a new one
+ * since it is not needed
+ * - if work was not queued already it will be now, even in case work
+ * was in fact already running: this behavior avoids any possible race
+ * when this function pushes new items onto the kfifos after the
+ * related executing worker had already determined the kfifo to be
+ * empty and it was terminating.
+ */
+ queue_work(r_evt->proto->equeue.wq,
+ &r_evt->proto->equeue.notify_work);
+
+ return 0;
+}
+
+/**
+ * scmi_kfifo_free() - Devres action helper to free the kfifo
+ * @kfifo: The kfifo to free
+ */
+static void scmi_kfifo_free(void *kfifo)
+{
+ kfifo_free((struct kfifo *)kfifo);
+}
+
+/**
+ * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
+ * @ni: A reference to the notification instance to use
+ * @equeue: The events_queue to initialize
+ * @sz: Size of the kfifo buffer to allocate
+ *
+ * Allocate a buffer for the kfifo and initialize it.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
+ struct events_queue *equeue, size_t sz)
+{
+ int ret;
+
+ if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
+ return -ENOMEM;
+ /* Size could have been roundup to power-of-two */
+ equeue->sz = kfifo_size(&equeue->kfifo);
+
+ ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
+ &equeue->kfifo);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
+ equeue->wq = ni->notify_wq;
+
+ return ret;
+}
+
+/**
+ * scmi_allocate_registered_events_desc() - Allocate a registered events'
+ * descriptor
+ * @ni: A reference to the &struct scmi_notify_instance notification instance
+ * to use
+ * @proto_id: Protocol ID
+ * @queue_sz: Size of the associated queue to allocate
+ * @eh_sz: Size of the event header scratch area to pre-allocate
+ * @num_events: Number of events to support (size of @registered_events)
+ * @ops: Pointer to a struct holding references to protocol specific helpers
+ * needed during events handling
+ *
+ * It is supposed to be called only once for each protocol at protocol
+ * initialization time, so it warns if the requested protocol is found already
+ * registered.
+ *
+ * Return: The allocated and registered descriptor on Success
+ */
+static struct scmi_registered_events_desc *
+scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
+ u8 proto_id, size_t queue_sz, size_t eh_sz,
+ int num_events,
+ const struct scmi_event_ops *ops)
+{
+ int ret;
+ struct scmi_registered_events_desc *pd;
+
+ /* Ensure protocols are up to date */
+ smp_rmb();
+ if (WARN_ON(ni->registered_protocols[proto_id]))
+ return ERR_PTR(-EINVAL);
+
+ pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+ pd->id = proto_id;
+ pd->ops = ops;
+ pd->ni = ni;
+
+ ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
+ if (!pd->eh)
+ return ERR_PTR(-ENOMEM);
+ pd->eh_sz = eh_sz;
+
+ pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
+ sizeof(char *), GFP_KERNEL);
+ if (!pd->registered_events)
+ return ERR_PTR(-ENOMEM);
+ pd->num_events = num_events;
+
+ /* Initialize per protocol handlers table */
+ mutex_init(&pd->registered_mtx);
+ hash_init(pd->registered_events_handlers);
+
+ return pd;
+}
+
+/**
+ * scmi_register_protocol_events() - Register Protocol Events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * the protocol's events are registered
+ * @proto_id: Protocol ID
+ * @queue_sz: Size in bytes of the associated queue to be allocated
+ * @ops: Protocol specific event-related operations
+ * @evt: Event descriptor array
+ * @num_events: Number of events in @evt array
+ * @num_sources: Number of possible sources for this protocol on this
+ * platform.
+ *
+ * Used by SCMI Protocols initialization code to register with the notification
+ * core the list of supported events and their descriptors: takes care to
+ * pre-allocate and store all needed descriptors, scratch buffers and event
+ * queues.
+ *
+ * Return: 0 on Success
+ */
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources)
+{
+ int i;
+ size_t payld_sz = 0;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_notify_instance *ni;
+
+ if (!ops || !evt)
+ return -EINVAL;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENOMEM;
+ ni = handle->notify_priv;
+
+ /* Attach to the notification main devres group */
+ if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < num_events; i++)
+ payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
+ payld_sz += sizeof(struct scmi_event_header);
+
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
+ payld_sz, num_events, ops);
+ if (IS_ERR(pd))
+ goto err;
+
+ for (i = 0; i < num_events; i++, evt++) {
+ struct scmi_registered_event *r_evt;
+
+ r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
+ GFP_KERNEL);
+ if (!r_evt)
+ goto err;
+ r_evt->proto = pd;
+ r_evt->evt = evt;
+
+ r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
+ sizeof(refcount_t), GFP_KERNEL);
+ if (!r_evt->sources)
+ goto err;
+ r_evt->num_sources = num_sources;
+ mutex_init(&r_evt->sources_mtx);
+
+ r_evt->report = devm_kzalloc(ni->handle->dev,
+ evt->max_report_sz, GFP_KERNEL);
+ if (!r_evt->report)
+ goto err;
+
+ pd->registered_events[i] = r_evt;
+ /* Ensure events are updated */
+ smp_wmb();
+ dev_dbg(handle->dev, "registered event - %lX\n",
+ MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
+ }
+
+ /* Register protocol and events...it will never be removed */
+ ni->registered_protocols[proto_id] = pd;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ /*
+ * Finalize any pending events' handler which could have been waiting
+ * for this protocol's events registration.
+ */
+ schedule_work(&ni->init_work);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
+ /* A failing protocol registration does not trigger full failure */
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ return -ENOMEM;
+}
+
+/**
+ * scmi_allocate_event_handler() - Allocate Event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: 32bit key uniquely bind to the event identified by the tuple
+ * (proto_id, evt_id, src_id)
+ *
+ * Allocate an event handler and related notification chain associated with
+ * the provided event handler key.
+ * Note that, at this point, a related registered_event is still to be
+ * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
+ * is initialized as pending.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ * Return: the freshly allocated structure on Success
+ */
+static struct scmi_event_handler *
+scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_event_handler *hndl;
+
+ hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
+ if (!hndl)
+ return NULL;
+ hndl->key = evt_key;
+ BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
+ refcount_set(&hndl->users, 1);
+ /* New handlers are created pending */
+ hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
+
+ return hndl;
+}
+
+/**
+ * scmi_free_event_handler() - Free the provided Event handler
+ * @hndl: The event handler structure to free
+ *
+ * Context: Assumes to be called with proper locking acquired depending
+ * on the situation.
+ */
+static void scmi_free_event_handler(struct scmi_event_handler *hndl)
+{
+ hash_del(&hndl->hash);
+ kfree(hndl);
+}
+
+/**
+ * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to bind
+ *
+ * If an associated registered event is found, move the handler from the pending
+ * into the registered table.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
+ KEY_XTRACT_EVT_ID(hndl->key));
+ if (!r_evt)
+ return -EINVAL;
+
+ /* Remove from pending and insert into registered */
+ hash_del(&hndl->hash);
+ hndl->r_evt = r_evt;
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hash_add(r_evt->proto->registered_events_handlers,
+ &hndl->hash, hndl->key);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+
+ return 0;
+}
+
+/**
+ * scmi_valid_pending_handler() - Helper to check pending status of handlers
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to check
+ *
+ * An handler is considered pending when its r_evt == NULL, because the related
+ * event was still unknown at handler's registration time; anyway, since all
+ * protocols register their supported events once for all at protocols'
+ * initialization time, a pending handler cannot be considered valid anymore if
+ * the underlying event (which it is waiting for), belongs to an already
+ * initialized and registered protocol.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_events_desc *pd;
+
+ if (!IS_HNDL_PENDING(hndl))
+ return -EINVAL;
+
+ pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
+ if (pd)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * scmi_register_event_handler() - Register whenever possible an Event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to register
+ *
+ * At first try to bind an event handler to its associated event, then check if
+ * it was at least a valid pending handler: if it was not bound nor valid return
+ * false.
+ *
+ * Valid pending incomplete bindings will be periodically retried by a dedicated
+ * worker which is kicked each time a new protocol completes its own
+ * registration phase.
+ *
+ * Context: Assumes to be called with @pending_mtx acquired.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
+ hndl->key);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (!ret)
+ dev_dbg(ni->handle->dev,
+ "registered PENDING handler - key:%X\n",
+ hndl->key);
+ }
+
+ return ret;
+}
+
+/**
+ * __scmi_event_handler_get_ops() - Utility to get or create an event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ * @create: A boolean flag to specify if a handler must be created when
+ * not already existent
+ *
+ * Search for the desired handler matching the key in both the per-protocol
+ * registered table and the common pending table:
+ * * if found adjust users refcount
+ * * if not found and @create is true, create and register the new handler:
+ * handler could end up being registered as pending if no matching event
+ * could be found.
+ *
+ * An handler is guaranteed to reside in one and only one of the tables at
+ * any one time; to ensure this the whole search and create is performed
+ * holding the @pending_mtx lock, with @registered_mtx additionally acquired
+ * if needed.
+ *
+ * Note that when a nested acquisition of these mutexes is needed the locking
+ * order is always (same as in @init_work):
+ * 1. pending_mtx
+ * 2. registered_mtx
+ *
+ * Events generation is NOT enabled right after creation within this routine
+ * since at creation time we usually want to have all setup and ready before
+ * events really start flowing.
+ *
+ * Return: A properly refcounted handler on Success, NULL on Failure
+ */
+static inline struct scmi_event_handler *
+__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
+ u32 evt_key, bool create)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+
+ mutex_lock(&ni->pending_mtx);
+ /* Search registered events at first ... if possible at all */
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ /* ...then amongst pending. */
+ if (!hndl) {
+ hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ }
+
+ /* Create if still not found and required */
+ if (!hndl && create) {
+ hndl = scmi_allocate_event_handler(ni, evt_key);
+ if (hndl && scmi_register_event_handler(ni, hndl)) {
+ dev_dbg(ni->handle->dev,
+ "purging UNKNOWN handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ hndl = NULL;
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+
+ return hndl;
+}
+
+static struct scmi_event_handler *
+scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, false);
+}
+
+static struct scmi_event_handler *
+scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, true);
+}
+
+/**
+ * scmi_get_active_handler() - Helper to get active handlers only
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ *
+ * Search for the desired handler matching the key only in the per-protocol
+ * table of registered handlers: this is called only from the dispatching path
+ * so want to be as quick as possible and do not care about pending.
+ *
+ * Return: A properly refcounted active handler
+ */
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ return hndl;
+}
+
+/**
+ * __scmi_enable_evt() - Enable/disable events generation
+ * @r_evt: The registered event to act upon
+ * @src_id: The src_id to act upon
+ * @enable: The action to perform: true->Enable, false->Disable
+ *
+ * Takes care of proper refcounting while performing enable/disable: handles
+ * the special case of ALL sources requests by itself.
+ * Returns successfully if at least one of the required src_id has been
+ * successfully enabled/disabled.
+ *
+ * Return: 0 on Success
+ */
+static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
+ u32 src_id, bool enable)
+{
+ int retvals = 0;
+ u32 num_sources;
+ refcount_t *sid;
+
+ if (src_id == SRC_ID_MASK) {
+ src_id = 0;
+ num_sources = r_evt->num_sources;
+ } else if (src_id < r_evt->num_sources) {
+ num_sources = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&r_evt->sources_mtx);
+ if (enable) {
+ for (; num_sources; src_id++, num_sources--) {
+ int ret = 0;
+
+ sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == 0) {
+ ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
+ src_id);
+ if (!ret)
+ refcount_set(sid, 1);
+ } else {
+ refcount_inc(sid);
+ }
+ retvals += !ret;
+ }
+ } else {
+ for (; num_sources; src_id++, num_sources--) {
+ sid = &r_evt->sources[src_id];
+ if (refcount_dec_and_test(sid))
+ REVT_NOTIFY_DISABLE(r_evt,
+ r_evt->evt->id, src_id);
+ }
+ retvals = 1;
+ }
+ mutex_unlock(&r_evt->sources_mtx);
+
+ return retvals ? 0 : -EINVAL;
+}
+
+static int scmi_enable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (!hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), true);
+ if (!ret)
+ hndl->enabled = true;
+ }
+
+ return ret;
+}
+
+static int scmi_disable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), false);
+ if (!ret)
+ hndl->enabled = false;
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_put_handler_unlocked() - Put an event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to act upon
+ *
+ * After having got exclusive access to the registered handlers hashtable,
+ * update the refcount and if @hndl is no more in use by anyone:
+ * * ask for events' generation disabling
+ * * unregister and free the handler itself
+ *
+ * Context: Assumes all the proper locking has been managed by the caller.
+ */
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ if (refcount_dec_and_test(&hndl->users)) {
+ if (!IS_HNDL_PENDING(hndl))
+ scmi_disable_events(hndl);
+ scmi_free_event_handler(hndl);
+ }
+}
+
+static void scmi_put_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&ni->pending_mtx);
+ if (r_evt)
+ mutex_lock(&r_evt->proto->registered_mtx);
+
+ scmi_put_handler_unlocked(ni, hndl);
+
+ if (r_evt)
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ mutex_unlock(&ni->pending_mtx);
+}
+
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&r_evt->proto->registered_mtx);
+ scmi_put_handler_unlocked(ni, hndl);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+}
+
+/**
+ * scmi_event_handler_enable_events() - Enable events associated to an handler
+ * @hndl: The Event handler to act upon
+ *
+ * Return: 0 on Success
+ */
+static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
+{
+ if (scmi_enable_events(hndl)) {
+ pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_register_notifier() - Register a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is registered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic helper to register a notifier_block against a protocol event.
+ *
+ * A notifier_block @nb will be registered for each distinct event identified
+ * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
+ * so that:
+ *
+ * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
+ *
+ * @src_id meaning is protocol specific and identifies the origin of the event
+ * (like domain_id, sensor_id and so forth).
+ *
+ * @src_id can be NULL to signify that the caller is interested in receiving
+ * notifications from ALL the available sources for that protocol OR simply that
+ * the protocol does not support distinct sources.
+ *
+ * As soon as one user for the specified tuple appears, an handler is created,
+ * and that specific event's generation is enabled at the platform level, unless
+ * an associated registered event is found missing, meaning that the needed
+ * protocol is still to be initialized and the handler has just been registered
+ * as still pending.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret = 0;
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_or_create_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ blocking_notifier_chain_register(&hndl->chain, nb);
+
+ /* Enable events for not pending handlers */
+ if (!IS_HNDL_PENDING(hndl)) {
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret)
+ scmi_put_handler(ni, hndl);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_unregister_notifier() - Unregister a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is unregistered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID
+ * @nb: The notifier_block to unregister
+ *
+ * Takes care to unregister the provided @nb from the notification chain
+ * associated to the specified event and, if there are no more users for the
+ * event handler, frees also the associated event handler structures.
+ * (this could possibly cause disabling of event's generation at platform level)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_unregister_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ /*
+ * Note that this chain unregistration call is safe on its own
+ * being internally protected by an rwsem.
+ */
+ blocking_notifier_chain_unregister(&hndl->chain, nb);
+ scmi_put_handler(ni, hndl);
+
+ /*
+ * This balances the initial get issued in @scmi_register_notifier.
+ * If this notifier_block happened to be the last known user callback
+ * for this event, the handler is here freed and the event's generation
+ * stopped.
+ *
+ * Note that, an ongoing concurrent lookup on the delivery workqueue
+ * path could still hold the refcount to 1 even after this routine
+ * completes: in such a case it will be the final put on the delivery
+ * path which will finally free this unused handler.
+ */
+ scmi_put_handler(ni, hndl);
+
+ return 0;
+}
+
+/**
+ * scmi_protocols_late_init() - Worker for late initialization
+ * @work: The work item to use associated to the proper SCMI instance
+ *
+ * This kicks in whenever a new protocol has completed its own registration via
+ * scmi_register_protocol_events(): it is in charge of scanning the table of
+ * pending handlers (registered by users while the related protocol was still
+ * not initialized) and finalizing their initialization whenever possible;
+ * invalid pending handlers are purged at this point in time.
+ */
+static void scmi_protocols_late_init(struct work_struct *work)
+{
+ int bkt;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+ struct hlist_node *tmp;
+
+ ni = container_of(work, struct scmi_notify_instance, init_work);
+
+ /* Ensure protocols and events are up to date */
+ smp_rmb();
+
+ mutex_lock(&ni->pending_mtx);
+ hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev,
+ "finalized PENDING handler - key:%X\n",
+ hndl->key);
+ ret = scmi_event_handler_enable_events(hndl);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ }
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+}
+
+/*
+ * notify_ops are attached to the handle so that can be accessed
+ * directly from an scmi_driver to register its own notifiers.
+ */
+static struct scmi_notify_ops notify_ops = {
+ .register_event_notifier = scmi_register_notifier,
+ .unregister_event_notifier = scmi_unregister_notifier,
+};
+
+/**
+ * scmi_notification_init() - Initializes Notification Core Support
+ * @handle: The handle identifying the platform instance to initialize
+ *
+ * This function lays out all the basic resources needed by the notification
+ * core instance identified by the provided handle: once done, all of the
+ * SCMI Protocols can register their events with the core during their own
+ * initializations.
+ *
+ * Note that failing to initialize the core notifications support does not
+ * cause the whole SCMI Protocols stack to fail its initialization.
+ *
+ * SCMI Notification Initialization happens in 2 steps:
+ * * initialization: basic common allocations (this function)
+ * * registration: protocols asynchronously come into life and registers their
+ * own supported list of events with the core; this causes
+ * further per-protocol allocations
+ *
+ * Any user's callback registration attempt, referring a still not registered
+ * event, will be registered as pending and finalized later (if possible)
+ * by scmi_protocols_late_init() work.
+ * This allows for lazy initialization of SCMI Protocols due to late (or
+ * missing) SCMI drivers' modules loading.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notification_init(struct scmi_handle *handle)
+{
+ void *gid;
+ struct scmi_notify_instance *ni;
+
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ goto err;
+
+ ni->gid = gid;
+ ni->handle = handle;
+
+ ni->notify_wq = alloc_workqueue("scmi_notify",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!ni->notify_wq)
+ goto err;
+
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
+ mutex_init(&ni->pending_mtx);
+ hash_init(ni->pending_events_handlers);
+
+ INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+
+ handle->notify_ops = &notify_ops;
+ handle->notify_priv = ni;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ dev_info(handle->dev, "Core Enabled.\n");
+
+ devres_close_group(handle->dev, ni->gid);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Initialization Failed.\n");
+ devres_release_group(handle->dev, NULL);
+ return -ENOMEM;
+}
+
+/**
+ * scmi_notification_exit() - Shutdown and clean Notification core
+ * @handle: The handle identifying the platform instance to shutdown
+ */
+void scmi_notification_exit(struct scmi_handle *handle)
+{
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return;
+ ni = handle->notify_priv;
+
+ handle->notify_priv = NULL;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ /* Destroy while letting pending work complete */
+ destroy_workqueue(ni->notify_wq);
+
+ devres_release_group(ni->handle->dev, ni->gid);
+}
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
new file mode 100644
index 000000000000..3485f20fa70e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * notification header file containing some definitions, structures
+ * and function prototypes related to SCMI Notification handling.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef _SCMI_NOTIFY_H
+#define _SCMI_NOTIFY_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define SCMI_PROTO_QUEUE_SZ 4096
+
+/**
+ * struct scmi_event - Describes an event to be supported
+ * @id: Event ID
+ * @max_payld_sz: Max possible size for the payload of a notification message
+ * @max_report_sz: Max possible size for the report of a notification message
+ *
+ * Each SCMI protocol, during its initialization phase, can describe the events
+ * it wishes to support in a few struct scmi_event and pass them to the core
+ * using scmi_register_protocol_events().
+ */
+struct scmi_event {
+ u8 id;
+ size_t max_payld_sz;
+ size_t max_report_sz;
+};
+
+/**
+ * struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
+ * using the proper custom protocol commands.
+ * Return 0 on Success
+ * @fill_custom_report: fills a custom event report from the provided
+ * event message payld identifying the event
+ * specific src_id.
+ * Return NULL on failure otherwise @report now fully
+ * populated
+ *
+ * Context: Helpers described in &struct scmi_event_ops are called only in
+ * process context.
+ */
+struct scmi_event_ops {
+ int (*set_notify_enabled)(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enabled);
+ void *(*fill_custom_report)(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id);
+};
+
+int scmi_notification_init(struct scmi_handle *handle);
+void scmi_notification_exit(struct scmi_handle *handle);
+
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources);
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts);
+
+#endif /* _SCMI_NOTIFY_H */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index eadc171e254b..3e1e87012c95 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,15 +5,19 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
#include <linux/bits.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
#include <linux/sort.h>
#include "common.h"
+#include "notify.h"
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
@@ -27,11 +31,6 @@ enum scmi_performance_protocol_cmd {
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
-enum scmi_performance_protocol_notify {
- PERFORMANCE_LIMITS_CHANGED = 0x0,
- PERFORMANCE_LEVEL_CHANGED = 0x1,
-};
-
struct scmi_opp {
u32 perf;
u32 power;
@@ -86,6 +85,19 @@ struct scmi_perf_notify_level_or_limits {
__le32 notify_enable;
};
+struct scmi_perf_limits_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 range_max;
+ __le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 performance_level;
+};
+
struct scmi_msg_resp_perf_describe_levels {
__le16 num_returned;
__le16 num_remaining;
@@ -158,6 +170,11 @@ struct scmi_perf_info {
struct perf_dom_info *dom_info;
};
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+ PERF_NOTIFY_LIMITS,
+ PERF_NOTIFY_LEVEL,
+};
+
static int scmi_perf_attributes_get(const struct scmi_handle *handle,
struct scmi_perf_info *pi)
{
@@ -488,6 +505,29 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return scmi_perf_mb_level_get(handle, domain, level, poll);
}
+static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+ u32 domain, int message_id,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_notify_level_or_limits *notify;
+
+ ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
{
if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
@@ -697,6 +737,17 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
+static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+ struct device *dev)
+{
+ struct perf_dom_info *dom;
+ struct scmi_perf_info *pi = handle->perf_priv;
+
+ dom = pi->dom_info + scmi_dev_domain_id(dev);
+
+ return dom->fc_info && dom->fc_info->level_set_addr;
+}
+
static struct scmi_perf_ops perf_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
@@ -708,6 +759,90 @@ static struct scmi_perf_ops perf_ops = {
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
+ .fast_switch_possible = scmi_fast_switch_possible,
+};
+
+static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+ {
+ const struct scmi_perf_limits_notify_payld *p = payld;
+ struct scmi_perf_limits_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->range_max = le32_to_cpu(p->range_max);
+ r->range_min = le32_to_cpu(p->range_min);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+ {
+ const struct scmi_perf_level_notify_payld *p = payld;
+ struct scmi_perf_level_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->performance_level = le32_to_cpu(p->performance_level);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static const struct scmi_event perf_events[] = {
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_limits_report),
+ },
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_level_report),
+ },
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+ .set_notify_enabled = scmi_perf_set_notify_enabled,
+ .fill_custom_report = scmi_perf_fill_custom_report,
};
static int scmi_perf_protocol_init(struct scmi_handle *handle)
@@ -742,6 +877,12 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
+ &perf_event_ops, perf_events,
+ ARRAY_SIZE(perf_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->perf_ops = &perf_ops;
handle->perf_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index cf7f0312381b..46f213644c49 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -5,19 +5,18 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
- POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7,
-};
-
-enum scmi_power_protocol_notify {
- POWER_STATE_CHANGED = 0x0,
- POWER_STATE_CHANGE_REQUESTED = 0x1,
};
struct scmi_msg_resp_power_attributes {
@@ -48,6 +47,12 @@ struct scmi_power_state_notify {
__le32 notify_enable;
};
+struct scmi_power_state_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_state;
+};
+
struct power_dom_info {
bool state_set_sync;
bool state_set_async;
@@ -186,6 +191,75 @@ static struct scmi_power_ops power_ops = {
.state_get = scmi_power_state_get,
};
+static int scmi_power_request_notify(const struct scmi_handle *handle,
+ u32 domain, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_state_notify *notify;
+
+ ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
+ SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_power_request_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_power_state_notify_payld *p = payld;
+ struct scmi_power_state_changed_report *r = report;
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_state = le32_to_cpu(p->power_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event power_events[] = {
+ {
+ .id = SCMI_EVENT_POWER_STATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_power_state_changed_report),
+ },
+};
+
+static const struct scmi_event_ops power_event_ops = {
+ .set_notify_enabled = scmi_power_set_notify_enabled,
+ .fill_custom_report = scmi_power_fill_custom_report,
+};
+
static int scmi_power_protocol_init(struct scmi_handle *handle)
{
int domain;
@@ -214,6 +288,12 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
scmi_power_domain_attributes_get(handle, domain, dom);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
+ &power_event_ops, power_events,
+ ARRAY_SIZE(power_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->power_ops = &power_ops;
handle->power_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index de73054554f3..3691bafca057 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -5,7 +5,12 @@
* Copyright (C) 2019 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
@@ -13,10 +18,6 @@ enum scmi_reset_protocol_cmd {
RESET_NOTIFY = 0x5,
};
-enum scmi_reset_protocol_notify {
- RESET_ISSUED = 0x0,
-};
-
#define NUM_RESET_DOMAIN_MASK 0xffff
#define RESET_NOTIFY_ENABLE BIT(0)
@@ -40,6 +41,18 @@ struct scmi_msg_reset_domain_reset {
#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
};
+struct scmi_msg_reset_notify {
+ __le32 id;
+ __le32 event_control;
+#define RESET_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_reset_issued_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 reset_state;
+};
+
struct reset_dom_info {
bool async_reset;
bool reset_notify;
@@ -190,6 +203,75 @@ static struct scmi_reset_ops reset_ops = {
.deassert = scmi_reset_domain_deassert,
};
+static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
+ bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
+ SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(domain_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_reset_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_reset_issued_notify_payld *p = payld;
+ struct scmi_reset_issued_report *r = report;
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->reset_state = le32_to_cpu(p->reset_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event reset_events[] = {
+ {
+ .id = SCMI_EVENT_RESET_ISSUED,
+ .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
+ .max_report_sz = sizeof(struct scmi_reset_issued_report),
+ },
+};
+
+static const struct scmi_event_ops reset_event_ops = {
+ .set_notify_enabled = scmi_reset_set_notify_enabled,
+ .fill_custom_report = scmi_reset_fill_custom_report,
+};
+
static int scmi_reset_protocol_init(struct scmi_handle *handle)
{
int domain;
@@ -218,6 +300,12 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
scmi_reset_domain_attributes_get(handle, domain, dom);
}
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
+ &reset_event_ops, reset_events,
+ ARRAY_SIZE(reset_events),
+ pinfo->num_domains);
+
pinfo->version = version;
handle->reset_ops = &reset_ops;
handle->reset_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index bafbfe358f97..9e44479f0284 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
- domains[i] = &scmi_pd->genpd;
+ if (handle->power_ops->state_get(handle, i, &state)) {
+ dev_warn(dev, "failed to get state for domain %d\n", i);
+ continue;
+ }
scmi_pd->domain = i;
scmi_pd->handle = handle;
@@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- if (handle->power_ops->state_get(handle, i, &state)) {
- dev_warn(dev, "failed to get state for domain %d\n", i);
- continue;
- }
-
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
+
+ domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index db1b1ab303da..1af0ad362e82 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -5,7 +5,12 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+
+#include <linux/scmi_protocol.h>
+
#include "common.h"
+#include "notify.h"
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
@@ -14,10 +19,6 @@ enum scmi_sensor_protocol_cmd {
SENSOR_READING_GET = 0x6,
};
-enum scmi_sensor_protocol_notify {
- SENSOR_TRIP_POINT_EVENT = 0x0,
-};
-
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
@@ -71,6 +72,12 @@ struct scmi_msg_sensor_reading_get {
#define SENSOR_READ_ASYNC BIT(0)
};
+struct scmi_sensor_trip_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ __le32 trip_point_desc;
+};
+
struct sensors_info {
u32 version;
int num_sensors;
@@ -271,11 +278,57 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
- .trip_point_notify = scmi_sensor_trip_point_notify,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
};
+static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
+
+ if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
+ sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+
+ return r;
+}
+
+static const struct scmi_event sensor_events[] = {
+ {
+ .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
+ .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
+ .max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
+ },
+};
+
+static const struct scmi_event_ops sensor_event_ops = {
+ .set_notify_enabled = scmi_sensor_set_notify_enabled,
+ .fill_custom_report = scmi_sensor_fill_custom_report,
+};
+
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
@@ -299,6 +352,12 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
scmi_sensor_description_get(handle, sinfo);
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
+ &sensor_event_ops, sensor_events,
+ ARRAY_SIZE(sensor_events),
+ sinfo->num_sensors);
+
sinfo->version = version;
handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 49bc4b0e8428..a1537d123e38 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -21,6 +21,7 @@
*
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
* @func_id: smc/hvc call function id
*/
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index a1b199de9006..e97a9c9d010c 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -37,9 +37,8 @@ static const struct dmi_system_id * const embedded_fw_table[] = {
static int __init efi_check_md_for_embedded_firmware(
efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
{
- struct sha256_state sctx;
struct efi_embedded_fw *fw;
- u8 sha256[32];
+ u8 hash[32];
u64 i, size;
u8 *map;
@@ -54,10 +53,8 @@ static int __init efi_check_md_for_embedded_firmware(
if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
continue;
- sha256_init(&sctx);
- sha256_update(&sctx, map + i, desc->length);
- sha256_final(&sctx, sha256);
- if (memcmp(sha256, desc->sha256, 32) == 0)
+ sha256(map + i, desc->length, hash);
+ if (memcmp(hash, desc->sha256, 32) == 0)
break;
}
if ((i + desc->length) > size) {
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 08bc9ddfbdfb..b76acbade2a0 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
-obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index db655e87cdc8..d9dcc20945c6 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -10,6 +10,7 @@
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/mailbox_client.h>
+#include <linux/suspend.h>
#define IMX_SC_IRQ_FUNC_ENABLE 1
#define IMX_SC_IRQ_FUNC_STATUS 2
@@ -91,6 +92,7 @@ static void imx_scu_irq_work_handler(struct work_struct *work)
if (!irq_status)
continue;
+ pm_system_wakeup();
imx_scu_irq_notifier_call_chain(irq_status, &i);
}
}
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/firmware/imx/imx-scu-soc.c
index 20d37eaeb5f2..2f32353de2c9 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/firmware/imx/imx-scu-soc.c
@@ -10,9 +10,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
-
-static struct imx_sc_ipc *soc_ipc_handle;
+static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
struct imx_sc_msg_misc_get_soc_id {
struct imx_sc_rpc_msg hdr;
@@ -44,7 +42,7 @@ static int imx_scu_soc_uid(u64 *soc_uid)
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
@@ -71,7 +69,7 @@ static int imx_scu_soc_id(void)
msg.data.req.control = IMX_SC_C_ID;
msg.data.req.resource = IMX_SC_R_SYSTEM;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
return ret;
@@ -80,7 +78,7 @@ static int imx_scu_soc_id(void)
return msg.data.resp.id;
}
-static int imx_scu_soc_probe(struct platform_device *pdev)
+int imx_scu_soc_init(struct device *dev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
@@ -88,11 +86,11 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
u64 uid = 0;
u32 val;
- ret = imx_scu_get_handle(&soc_ipc_handle);
+ ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle);
if (ret)
return ret;
- soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr),
GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -115,73 +113,26 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
/* format soc_id value passed from SCU firmware */
val = id & 0x1f;
- soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
+ soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val);
if (!soc_dev_attr->soc_id)
return -ENOMEM;
/* format revision value passed from SCU firmware */
val = (id >> 5) & 0xf;
val = (((val >> 2) + 1) << 4) | (val & 0x3);
- soc_dev_attr->revision = kasprintf(GFP_KERNEL,
- "%d.%d",
- (val >> 4) & 0xf,
- val & 0xf);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
- goto free_soc_id;
- }
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (val >> 4) & 0xf, val & 0xf);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
- soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", uid);
- if (!soc_dev_attr->serial_number) {
- ret = -ENOMEM;
- goto free_revision;
- }
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL,
+ "%016llX", uid);
+ if (!soc_dev_attr->serial_number)
+ return -ENOMEM;
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
- goto free_serial_number;
- }
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
return 0;
-
-free_serial_number:
- kfree(soc_dev_attr->serial_number);
-free_revision:
- kfree(soc_dev_attr->revision);
-free_soc_id:
- kfree(soc_dev_attr->soc_id);
- return ret;
-}
-
-static struct platform_driver imx_scu_soc_driver = {
- .driver = {
- .name = IMX_SCU_SOC_DRIVER_NAME,
- },
- .probe = imx_scu_soc_probe,
-};
-
-static int __init imx_scu_soc_init(void)
-{
- struct platform_device *pdev;
- struct device_node *np;
- int ret;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
- if (!np)
- return -ENODEV;
-
- of_node_put(np);
-
- ret = platform_driver_register(&imx_scu_soc_driver);
- if (ret)
- return ret;
-
- pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
- -1, NULL, 0);
- if (IS_ERR(pdev))
- platform_driver_unregister(&imx_scu_soc_driver);
-
- return PTR_ERR_OR_ZERO(pdev);
}
-device_initcall(imx_scu_soc_init);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 2ab048222fe9..dca79caccd01 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -328,6 +328,10 @@ static int imx_scu_probe(struct platform_device *pdev)
imx_sc_ipc_handle = sc_ipc;
+ ret = imx_scu_soc_init(dev);
+ if (ret)
+ dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
+
ret = imx_scu_enable_general_irq_channel(dev);
if (ret)
dev_warn(dev,
diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
new file mode 100644
index 000000000000..a12db6ff323b
--- /dev/null
+++ b/drivers/firmware/imx/rm.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ *
+ * File containing client-side RPC functions for the RM service. These
+ * function are ported to clients that communicate to the SC.
+ */
+
+#include <linux/firmware/imx/svc/rm.h>
+
+struct imx_sc_msg_rm_rsrc_owned {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+} __packed __aligned(4);
+
+/*
+ * This function check @resource is owned by current partition or not
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ *
+ * @return Returns 0 for not owned and 1 for owned.
+ */
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ struct imx_sc_msg_rm_rsrc_owned msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED;
+ hdr->size = 2;
+
+ msg.resource = resource;
+
+ /*
+ * SCU firmware only returns value 0 or 1
+ * for resource owned check which means not owned or owned.
+ * So it is always successful.
+ */
+ imx_scu_call_rpc(ipc, &msg, true);
+
+ return hdr->func;
+}
+EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index fb5523aa16ee..af3d6d9ead28 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -167,8 +167,18 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
/* CM40 SS */
- { "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
- { "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
+ { "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
+ { "cm40-intmux", IMX_SC_R_M4_0_INTMUX, 1, false, 0 },
+ { "cm40-pid", IMX_SC_R_M4_0_PID0, 5, true, 0},
+ { "cm40-mu-a1", IMX_SC_R_M4_0_MU_1A, 1, false, 0},
+ { "cm40-lpuart", IMX_SC_R_M4_0_UART, 1, false, 0},
+
+ /* CM41 SS */
+ { "cm41-i2c", IMX_SC_R_M4_1_I2C, 1, false, 0 },
+ { "cm41-intmux", IMX_SC_R_M4_1_INTMUX, 1, false, 0 },
+ { "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
+ { "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
+ { "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 0e7233a20f34..8393bb3265cc 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -391,7 +391,7 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
- return qcom_scm_call(__scm->dev, &desc, NULL);
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
static void qcom_scm_set_download_mode(bool enable)
@@ -650,7 +650,7 @@ int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
int ret;
- ret = qcom_scm_call(__scm->dev, &desc, &res);
+ ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
if (ret >= 0)
*val = res.result[0];
@@ -669,8 +669,7 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
.owner = ARM_SMCCC_OWNER_SIP,
};
-
- return qcom_scm_call(__scm->dev, &desc, NULL);
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL(qcom_scm_io_writel);
@@ -1151,6 +1150,7 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8994" },
{ .compatible = "qcom,scm-msm8996" },
{ .compatible = "qcom,scm" },
{}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 039e0f91dba8..6945c3c96637 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
- if (err)
- goto err_register;
+ if (err) {
+ kobject_put(&entry->kobj);
+ return err;
+ }
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
-err_register:
kfree(entry);
return err;
}
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
index 27b675d76235..15e7466179a6 100644
--- a/drivers/firmware/smccc/Kconfig
+++ b/drivers/firmware/smccc/Kconfig
@@ -14,3 +14,12 @@ config HAVE_ARM_SMCCC_DISCOVERY
to add SMCCC discovery mechanism though the PSCI firmware
implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
success on firmware compliant to SMCCC v1.1 and above.
+
+config ARM_SMCCC_SOC_ID
+ bool "SoC bus device for the ARM SMCCC SOC_ID"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default y
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM SMCCC firmware based
+ platforms providing some sysfs information about the SoC variant.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
index 6f369fe3f0b9..72ab84042832 100644
--- a/drivers/firmware/smccc/Makefile
+++ b/drivers/firmware/smccc/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
+obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
new file mode 100644
index 000000000000..581aa5e9b077
--- /dev/null
+++ b/drivers/firmware/smccc/soc_id.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24)
+/*
+ * As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C)
+ * Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification
+ * code with parity bit for the SiP. We can drop the parity bit.
+ */
+#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16)
+#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0)
+
+#define JEP106_BANK_CONT_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x)))
+#define JEP106_ID_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x)))
+#define IMP_DEF_SOC_ID(x) \
+ (u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x)))
+
+static struct soc_device *soc_dev;
+static struct soc_device_attribute *soc_dev_attr;
+
+static int __init smccc_soc_init(void)
+{
+ struct arm_smccc_res res;
+ int soc_id_rev, soc_id_version;
+ static char soc_id_str[20], soc_id_rev_str[12];
+ static char soc_id_jep106_id_str[12];
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_SOC_ID, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
+ return 0;
+ }
+
+ if ((int)res.a0 < 0) {
+ pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
+ res.a0);
+ return -EINVAL;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_version = res.a0;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_rev = res.a0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ sprintf(soc_id_rev_str, "0x%08x", soc_id_rev);
+ sprintf(soc_id_jep106_id_str, "jep106:%02x%02x",
+ JEP106_BANK_CONT_CODE(soc_id_version),
+ JEP106_ID_CODE(soc_id_version));
+ sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str,
+ IMP_DEF_SOC_ID(soc_id_version));
+
+ soc_dev_attr->soc_id = soc_id_str;
+ soc_dev_attr->revision = soc_id_rev_str;
+ soc_dev_attr->family = soc_id_jep106_id_str;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
+ return 0;
+}
+module_init(smccc_soc_init);
+
+static void __exit smccc_soc_exit(void)
+{
+ if (soc_dev)
+ soc_device_unregister(soc_dev);
+ kfree(soc_dev_attr);
+}
+module_exit(smccc_soc_exit);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 4379475c99ed..9378075d04e9 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,10 +20,16 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
-#define INVALID_RETRY_COUNTER 0xFFFFFFFF
+#define INVALID_RETRY_COUNTER 0xFF
+#define INVALID_DCMF_VERSION 0xFF
+
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
@@ -35,11 +41,16 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @lock: a mutex to protect callback completion state
* @status.current_image: address of image currently running in flash
* @status.fail_image: address of failed image in flash
- * @status.version: the version number of RSU firmware
+ * @status.version: the interface version number of RSU firmware
* @status.state: the state of RSU system
* @status.error_details: error code
* @status.error_location: the error offset inside the image that failed
+ * @dcmf_version.dcmf0: Quartus dcmf0 version
+ * @dcmf_version.dcmf1: Quartus dcmf1 version
+ * @dcmf_version.dcmf2: Quartus dcmf2 version
+ * @dcmf_version.dcmf3: Quartus dcmf3 version
* @retry_counter: the current image's retry counter
+ * @max_retry: the preset max retry value
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
@@ -54,7 +65,16 @@ struct stratix10_rsu_priv {
unsigned int error_details;
unsigned int error_location;
} status;
+
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_version;
+
unsigned int retry_counter;
+ unsigned int max_retry;
};
/**
@@ -109,7 +129,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ dev_warn(client->dev, "FW doesn't support notify\n");
else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -119,7 +139,7 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
/**
* rsu_retry_callback() - Callback from Intel service layer for getting
- * the current image's retry counter from firmware
+ * the current image's retry counter from the firmware
* @client: pointer to client
* @data: pointer to callback data structure
*
@@ -136,7 +156,7 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support retry\n");
+ dev_warn(client->dev, "FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
BIT(data->status));
@@ -145,6 +165,57 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
}
/**
+ * rsu_max_retry_callback() - Callback from Intel service layer for getting
+ * the max retry value from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for max retry.
+ */
+static void rsu_max_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *max_retry = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->max_retry = *max_retry;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "FW doesn't support max retry\n");
+ else
+ dev_err(client->dev, "Failed to get max retry %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_dcmf_version_callback() - Callback from Intel service layer for getting
+ * the DCMF version
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF version number
+ */
+static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value1 = (unsigned long long *)data->kaddr1;
+ unsigned long long *value2 = (unsigned long long *)data->kaddr2;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_version.dcmf0 = FIELD_GET(RSU_DCMF0_MASK, *value1);
+ priv->dcmf_version.dcmf1 = FIELD_GET(RSU_DCMF1_MASK, *value1);
+ priv->dcmf_version.dcmf2 = FIELD_GET(RSU_DCMF2_MASK, *value2);
+ priv->dcmf_version.dcmf3 = FIELD_GET(RSU_DCMF3_MASK, *value2);
+ } else
+ dev_err(client->dev, "failed to get DCMF version\n");
+
+ complete(&priv->completion);
+}
+
+/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
* @command: RSU status or update command
@@ -282,6 +353,61 @@ static ssize_t retry_counter_show(struct device *dev,
return sprintf(buf, "0x%08x\n", priv->retry_counter);
}
+static ssize_t max_retry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->max_retry);
+}
+
+static ssize_t dcmf0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf0);
+}
+
+static ssize_t dcmf1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf1);
+}
+
+static ssize_t dcmf2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf2);
+}
+
+static ssize_t dcmf3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
+}
+
static ssize_t reboot_image_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -290,7 +416,7 @@ static ssize_t reboot_image_store(struct device *dev,
unsigned long address;
int ret;
- if (priv == 0)
+ if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &address);
@@ -315,7 +441,7 @@ static ssize_t notify_store(struct device *dev,
unsigned long status;
int ret;
- if (priv == 0)
+ if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &status);
@@ -353,6 +479,11 @@ static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(error_location);
static DEVICE_ATTR_RO(error_details);
static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_RO(max_retry);
+static DEVICE_ATTR_RO(dcmf0);
+static DEVICE_ATTR_RO(dcmf1);
+static DEVICE_ATTR_RO(dcmf2);
+static DEVICE_ATTR_RO(dcmf3);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
@@ -364,6 +495,11 @@ static struct attribute *rsu_attrs[] = {
&dev_attr_error_location.attr,
&dev_attr_error_details.attr,
&dev_attr_retry_counter.attr,
+ &dev_attr_max_retry.attr,
+ &dev_attr_dcmf0.attr,
+ &dev_attr_dcmf1.attr,
+ &dev_attr_dcmf2.attr,
+ &dev_attr_dcmf3.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
NULL
@@ -391,6 +527,11 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
priv->status.version = 0;
priv->status.state = 0;
priv->retry_counter = INVALID_RETRY_COUNTER;
+ priv->dcmf_version.dcmf0 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
+ priv->max_retry = INVALID_RETRY_COUNTER;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
@@ -412,12 +553,27 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
+ /* get DCMF version from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_VERSION,
+ 0, rsu_dcmf_version_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF version %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
+ ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
+ rsu_max_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU max retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
return ret;
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index e0db8dbfc9d1..3aa489dba30a 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -305,9 +305,15 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
+ case COMMAND_RSU_MAX_RETRY:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
+ case COMMAND_RSU_DCMF_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -406,6 +412,16 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+ case COMMAND_RSU_MAX_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_MAX_RETRY;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -474,6 +490,7 @@ static int svc_normal_to_secure_thread(void *data)
* doesn't support RSU notify or retry
*/
if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_MAX_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 636b40d4364d..c1bbba9ee93a 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -4,11 +4,14 @@
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
+static DEFINE_MUTEX(bpmp_debug_lock);
+
struct seqbuf {
char *buf;
size_t pos;
@@ -96,6 +99,354 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
return filename;
}
+static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
+ uint32_t *fd, uint32_t *len, bool write)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ ssize_t sz_name;
+ int err = 0;
+
+ sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
+ if (sz_name < 0) {
+ pr_err("File name too large: %s\n", name);
+ return -EINVAL;
+ }
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *len = resp.fop.datalen;
+ *fd = resp.fop.fd;
+
+ return 0;
+}
+
+static int mrq_debug_close(struct tegra_bpmp *bpmp, uint32_t fd)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_CLOSE),
+ .frd = {
+ .fd = fd,
+ },
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err = 0;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
+ char *data, size_t sz_data, uint32_t *nbytes)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_READ),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ int remaining, err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ if (len > sz_data) {
+ err = -EFBIG;
+ goto close;
+ }
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ memcpy(data, resp.frd.data, resp.frd.readlen);
+ data += resp.frd.readlen;
+ remaining -= resp.frd.readlen;
+ }
+
+ *nbytes = len;
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
+ uint8_t *data, size_t sz_data)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_WRITE)
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ size_t remaining;
+ int err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 1);
+ if (err)
+ goto out;
+
+ if (sz_data > len) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ req.fwr.fd = fd;
+ remaining = sz_data;
+
+ while (remaining > 0) {
+ len = min(remaining, sizeof(req.fwr.data));
+ memcpy(req.fwr.data, data, len);
+ req.fwr.datalen = len;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ data += req.fwr.datalen;
+ remaining -= req.fwr.datalen;
+ }
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int bpmp_debug_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ uint32_t nbytes = 0;
+ size_t len;
+ int err;
+
+ len = seq_get_buf(m, &databuf);
+ if (!databuf)
+ return -ENOMEM;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes);
+ if (!err)
+ seq_commit(m, nbytes);
+
+ return err;
+}
+
+static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ ssize_t err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ databuf = kmalloc(count, GFP_KERNEL);
+ if (!databuf)
+ return -ENOMEM;
+
+ if (copy_from_user(databuf, buf, count)) {
+ err = -EFAULT;
+ goto free_ret;
+ }
+
+ err = mrq_debug_write(bpmp, filename, databuf, count);
+
+free_ret:
+ kfree(databuf);
+
+ return err ?: count;
+}
+
+static int bpmp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, bpmp_debug_show, file, SZ_256K);
+}
+
+static const struct file_operations bpmp_debug_fops = {
+ .open = bpmp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = bpmp_debug_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
+ struct dentry *parent,
+ char *ppath)
+{
+ const size_t pathlen = SZ_256;
+ const size_t bufsize = SZ_16K;
+ uint32_t dsize, attrs = 0;
+ struct dentry *dentry;
+ struct seqbuf seqbuf;
+ char *buf, *pathbuf;
+ const char *name;
+ int err = 0;
+
+ if (!bpmp || !parent || !ppath)
+ return -EINVAL;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pathbuf = kzalloc(pathlen, GFP_KERNEL);
+ if (!pathbuf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
+ if (err)
+ goto out;
+
+ seqbuf_init(&seqbuf, buf, dsize);
+
+ while (!seqbuf_eof(&seqbuf)) {
+ err = seqbuf_read_u32(&seqbuf, &attrs);
+ if (err)
+ goto out;
+
+ err = seqbuf_read_str(&seqbuf, &name);
+ if (err < 0)
+ goto out;
+
+ if (attrs & DEBUGFS_S_ISDIR) {
+ size_t len;
+
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ len = strlen(ppath) + strlen(name) + 1;
+ if (len >= pathlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ strncpy(pathbuf, ppath, pathlen);
+ strncat(pathbuf, name, strlen(name));
+ strcat(pathbuf, "/");
+
+ err = bpmp_populate_debugfs_inband(bpmp, dentry,
+ pathbuf);
+ if (err < 0)
+ goto out;
+ } else {
+ umode_t mode;
+
+ mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
+ mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
+ dentry = debugfs_create_file(name, mode, parent, bpmp,
+ &bpmp_debug_fops);
+ if (!dentry) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(pathbuf);
+ kfree(buf);
+
+ return err;
+}
+
static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
dma_addr_t name, size_t sz_name,
dma_addr_t data, size_t sz_data,
@@ -127,6 +478,8 @@ static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.fop.nbytes;
@@ -184,6 +537,8 @@ static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
*nbytes = (size_t)resp.dumpdir.nbytes;
@@ -202,7 +557,7 @@ static int debugfs_show(struct seq_file *m, void *p)
char buf[256];
const char *filename;
size_t len, nbytes;
- int ret;
+ int err;
filename = get_filename(bpmp, file, buf, sizeof(buf));
if (!filename)
@@ -216,24 +571,24 @@ static int debugfs_show(struct seq_file *m, void *p)
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
len = strlen(filename);
strncpy(namevirt, filename, namesize);
- ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
&nbytes);
- if (!ret)
+ if (!err)
seq_write(m, datavirt, nbytes);
dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret;
+ return err;
}
static int debugfs_open(struct inode *inode, struct file *file)
@@ -253,7 +608,7 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
char fnamebuf[256];
const char *filename;
size_t len;
- int ret;
+ int err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
@@ -267,7 +622,7 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto free_namebuf;
}
@@ -275,11 +630,11 @@ static ssize_t debugfs_store(struct file *file, const char __user *buf,
strncpy(namevirt, filename, namesize);
if (copy_from_user(datavirt, buf, count)) {
- ret = -EFAULT;
+ err = -EFAULT;
goto free_databuf;
}
- ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
count);
free_databuf:
@@ -287,7 +642,7 @@ free_databuf:
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
- return ret ?: count;
+ return err ?: count;
}
static const struct file_operations debugfs_fops = {
@@ -350,59 +705,66 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
return 0;
}
-static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
- size_t bufsize, struct dentry *root)
+static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
{
struct seqbuf seqbuf;
+ const size_t sz = SZ_512K;
+ dma_addr_t phys;
+ size_t nbytes;
+ void *virt;
int err;
- bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
- if (!bpmp->debugfs_mirror)
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
return -ENOMEM;
- seqbuf_init(&seqbuf, buf, bufsize);
- err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+ err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
if (err < 0) {
- debugfs_remove_recursive(bpmp->debugfs_mirror);
- bpmp->debugfs_mirror = NULL;
+ goto free;
+ } else if (nbytes > sz) {
+ err = -EINVAL;
+ goto free;
}
+ seqbuf_init(&seqbuf, virt, nbytes);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+
return err;
}
int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
{
- dma_addr_t phys;
- void *virt;
- const size_t sz = SZ_256K;
- size_t nbytes;
- int ret;
struct dentry *root;
+ bool inband;
+ int err;
- if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
+
+ if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
return 0;
root = debugfs_create_dir("bpmp", NULL);
if (!root)
return -ENOMEM;
- virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
- GFP_KERNEL | GFP_DMA32);
- if (!virt) {
- ret = -ENOMEM;
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (!bpmp->debugfs_mirror) {
+ err = -ENOMEM;
goto out;
}
- ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
- if (ret < 0)
- goto free;
+ if (inband)
+ err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
+ "/");
+ else
+ err = bpmp_populate_debugfs_shmem(bpmp);
- ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
-free:
- dma_free_coherent(bpmp->dev, sz, virt, phys);
out:
- if (ret < 0)
- debugfs_remove(root);
+ if (err < 0)
+ debugfs_remove_recursive(root);
- return ret;
+ return err;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index fe6702df24bf..4d93d8925e14 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -515,10 +515,10 @@ bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
.size = sizeof(resp),
},
};
- int ret;
+ int err;
- ret = tegra_bpmp_transfer(bpmp, &msg);
- if (ret || msg.rx.ret)
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err || msg.rx.ret)
return false;
return resp.status == 0;
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 4126be9e3216..53cee17d0115 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index f0d068c03944..57cd04062994 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: http://processors.wiki.ti.com/index.php/TISCI for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index e27f68437b56..50bb2a6d6ccf 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -7,6 +7,7 @@
#include <linux/armada-37xx-rwtm-mailbox.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/hw_random.h>
#include <linux/mailbox_client.h>
@@ -69,6 +70,18 @@ struct mox_rwtm {
/* public key burned in eFuse */
int has_pubkey;
u8 pubkey[135];
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Signature process. This is currently done via debugfs, because it
+ * does not conform to the sysfs standard "one file per attribute".
+ * It should be rewritten via crypto API once akcipher API is available
+ * from userspace.
+ */
+ struct dentry *debugfs_root;
+ u32 last_sig[34];
+ int last_sig_done;
+#endif
};
struct mox_kobject {
@@ -279,6 +292,152 @@ unlock_mutex:
return ret;
}
+#ifdef CONFIG_DEBUG_FS
+static int rwtm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ ssize_t ret;
+
+ /* only allow one read, of 136 bytes, from position 0 */
+ if (*ppos != 0)
+ return 0;
+
+ if (len < 136)
+ return -EINVAL;
+
+ if (!rwtm->last_sig_done)
+ return -ENODATA;
+
+ /* 2 arrays of 17 32-bit words are 136 bytes */
+ ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
+ rwtm->last_sig_done = 0;
+
+ return ret;
+}
+
+static ssize_t do_sign_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct armada_37xx_rwtm_tx_msg msg;
+ loff_t dummy = 0;
+ ssize_t ret;
+
+ /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
+ if (len != 64)
+ return -EINVAL;
+
+ /* if last result is not zero user has not read that information yet */
+ if (rwtm->last_sig_done)
+ return -EBUSY;
+
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+
+ /*
+ * Here we have to send:
+ * 1. Address of the input to sign.
+ * The input is an array of 17 32-bit words, the first (most
+ * significat) is 0, the rest 16 words are copied from the SHA-512
+ * hash given by the user and converted from BE to LE.
+ * 2. Address of the buffer where ECDSA signature value R shall be
+ * stored by the rWTM firmware.
+ * 3. Address of the buffer where ECDSA signature value S shall be
+ * stored by the rWTM firmware.
+ */
+ memset(rwtm->buf, 0, 4);
+ ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
+ if (ret < 0)
+ goto unlock_mutex;
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
+
+ msg.command = MBOX_CMD_SIGN;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = rwtm->buf_phys + 68;
+ msg.args[3] = rwtm->buf_phys + 2 * 68;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = MBOX_STS_VALUE(reply->retval);
+ if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
+ goto unlock_mutex;
+
+ /*
+ * Here we read the R and S values of the ECDSA signature
+ * computed by the rWTM firmware and convert their words from
+ * LE to BE.
+ */
+ memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
+ cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
+ rwtm->last_sig_done = 1;
+
+ mutex_unlock(&rwtm->busy);
+ return len;
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+static const struct file_operations do_sign_fops = {
+ .owner = THIS_MODULE,
+ .open = rwtm_debug_open,
+ .read = do_sign_read,
+ .write = do_sign_write,
+ .llseek = no_llseek,
+};
+
+static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("turris-mox-rwtm", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
+ &do_sign_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ rwtm->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+ debugfs_remove_recursive(rwtm->debugfs_root);
+}
+#else
+static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ return 0;
+}
+
+static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+}
+#endif
+
static int turris_mox_rwtm_probe(struct platform_device *pdev)
{
struct mox_rwtm *rwtm;
@@ -340,6 +499,12 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
goto free_channel;
}
+ ret = rwtm_register_debugfs(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
+ goto free_channel;
+ }
+
return 0;
free_channel:
@@ -355,6 +520,7 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev)
{
struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+ rwtm_unregister_debugfs(rwtm);
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
kobject_put(rwtm_to_kobj(rwtm));
mbox_free_channel(rwtm->mbox);
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02d8cbad1ae2..02b60fde0430 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,15 +16,6 @@
#include "dfl-afu.h"
-static void put_all_pages(struct page **pages, int npages)
-{
- int i;
-
- for (i = 0; i < npages; i++)
- if (pages[i])
- put_page(pages[i]);
-}
-
void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
{
struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
@@ -57,22 +48,22 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm;
}
- pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
+ pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages);
if (pinned < 0) {
ret = pinned;
goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto put_pages;
+ goto unpin_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
return 0;
-put_pages:
- put_all_pages(region->pages, pinned);
+unpin_pages:
+ unpin_user_pages(region->pages, pinned);
free_pages:
kfree(region->pages);
unlock_vm:
@@ -94,7 +85,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
long npages = region->length >> PAGE_SHIFT;
struct device *dev = &pdata->dev->dev;
- put_all_pages(region->pages, npages);
+ unpin_user_pages(region->pages, npages);
kfree(region->pages);
account_locked_vm(current->mm, npages, false);
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index c1467ae1a6b6..c4691187cca9 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -14,6 +14,7 @@
* Mitchel Henry <henry.mitchel@intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/uaccess.h>
#include "dfl-afu.h"
@@ -219,6 +220,21 @@ static void port_err_uinit(struct platform_device *pdev,
afu_port_err_mask(&pdev->dev, true);
}
+static long
+port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
const struct dfl_feature_id port_err_id_table[] = {
{.id = PORT_FEATURE_ID_ERROR,},
{0,}
@@ -227,4 +243,5 @@ const struct dfl_feature_id port_err_id_table[] = {
const struct dfl_feature_ops port_err_ops = {
.init = port_err_init,
.uinit = port_err_uinit,
+ .ioctl = port_err_ioctl,
};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 3fa2c5992173..753cda4b2568 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -530,6 +530,30 @@ static const struct dfl_feature_ops port_stp_ops = {
.init = port_stp_init,
};
+static long
+port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_PORT_UINT_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
+static const struct dfl_feature_id port_uint_id_table[] = {
+ {.id = PORT_FEATURE_ID_UINT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_uint_ops = {
+ .ioctl = port_uint_ioctl,
+};
+
static struct dfl_feature_driver port_feature_drvs[] = {
{
.id_table = port_hdr_id_table,
@@ -548,6 +572,10 @@ static struct dfl_feature_driver port_feature_drvs[] = {
.ops = &port_stp_ops,
},
{
+ .id_table = port_uint_id_table,
+ .ops = &port_uint_ops,
+ },
+ {
.ops = NULL,
}
};
@@ -578,6 +606,7 @@ static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
@@ -587,6 +616,9 @@ static int afu_release(struct inode *inode, struct file *filp)
dfl_feature_dev_use_end(pdata);
if (!dfl_feature_dev_use_count(pdata)) {
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
__port_reset(pdev);
afu_dma_region_destroy(pdata);
}
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index f897d414b923..51c2892ec06d 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -15,6 +15,7 @@
* Mitchel, Henry <henry.mitchel@intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/uaccess.h>
#include "dfl.h"
@@ -348,6 +349,22 @@ static void fme_global_err_uinit(struct platform_device *pdev,
fme_err_mask(&pdev->dev, true);
}
+static long
+fme_global_error_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
+ return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
+ case DFL_FPGA_FME_ERR_SET_IRQ:
+ return dfl_feature_ioctl_set_irq(pdev, feature, arg);
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ return -ENODEV;
+ }
+}
+
const struct dfl_feature_id fme_global_err_id_table[] = {
{.id = FME_FEATURE_ID_GLOBAL_ERR,},
{0,}
@@ -356,4 +373,5 @@ const struct dfl_feature_id fme_global_err_id_table[] = {
const struct dfl_feature_ops fme_global_err_ops = {
.init = fme_global_err_init,
.uinit = fme_global_err_uinit,
+ .ioctl = fme_global_error_ioctl,
};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index fc210d4e1863..77ea04d4edbe 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -620,11 +620,17 @@ static int fme_release(struct inode *inode, struct file *filp)
{
struct dfl_feature_platform_data *pdata = filp->private_data;
struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
mutex_lock(&pdata->lock);
dfl_feature_dev_use_end(pdata);
+
+ if (!dfl_feature_dev_use_count(pdata))
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_set_irq_triggers(feature, 0,
+ feature->nr_irqs, NULL);
mutex_unlock(&pdata->lock);
return 0;
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index a78c409bf2c4..e220bec2927d 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -39,10 +39,32 @@ static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
return pcim_iomap_table(pcidev)[bar];
}
+static int cci_pci_alloc_irq(struct pci_dev *pcidev)
+{
+ int ret, nvec = pci_msix_vec_count(pcidev);
+
+ if (nvec <= 0) {
+ dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
+ return 0;
+ }
+
+ ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0)
+ return ret;
+
+ return nvec;
+}
+
+static void cci_pci_free_irq(struct pci_dev *pcidev)
+{
+ pci_free_irq_vectors(pcidev);
+}
+
/* PCI Device ID */
#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
/* VF Device */
#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
@@ -55,6 +77,7 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
{0,}
};
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
@@ -78,17 +101,34 @@ static void cci_remove_feature_devs(struct pci_dev *pcidev)
/* remove all children feature devices */
dfl_fpga_feature_devs_remove(drvdata->cdev);
+ cci_pci_free_irq(pcidev);
+}
+
+static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
+{
+ unsigned int i;
+ int *table;
+
+ table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
+ if (!table)
+ return table;
+
+ for (i = 0; i < nvec; i++)
+ table[i] = pci_irq_vector(pcidev, i);
+
+ return table;
}
/* enumerate feature devices under pci device */
static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ int port_num, bar, i, nvec, ret = 0;
struct dfl_fpga_enum_info *info;
struct dfl_fpga_cdev *cdev;
resource_size_t start, len;
- int port_num, bar, i, ret = 0;
void __iomem *base;
+ int *irq_table;
u32 offset;
u64 v;
@@ -97,11 +137,30 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
if (!info)
return -ENOMEM;
+ /* add irq info for enumeration if the device support irq */
+ nvec = cci_pci_alloc_irq(pcidev);
+ if (nvec < 0) {
+ dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
+ ret = nvec;
+ goto enum_info_free_exit;
+ } else if (nvec) {
+ irq_table = cci_pci_create_irq_table(pcidev, nvec);
+ if (!irq_table) {
+ ret = -ENOMEM;
+ goto irq_free_exit;
+ }
+
+ ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
+ kfree(irq_table);
+ if (ret)
+ goto irq_free_exit;
+ }
+
/* start to find Device Feature List from Bar 0 */
base = cci_pci_ioremap_bar(pcidev, 0);
if (!base) {
ret = -ENOMEM;
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
/*
@@ -154,7 +213,7 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
dfl_fpga_enum_info_add_dfl(info, start, len, base);
} else {
ret = -ENODEV;
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
/* start enumeration with prepared enumeration information */
@@ -162,11 +221,14 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
if (IS_ERR(cdev)) {
dev_err(&pcidev->dev, "Enumeration failure\n");
ret = PTR_ERR(cdev);
- goto enum_info_free_exit;
+ goto irq_free_exit;
}
drvdata->cdev = cdev;
+irq_free_exit:
+ if (ret)
+ cci_pci_free_irq(pcidev);
enum_info_free_exit:
dfl_fpga_enum_info_free(info);
@@ -211,12 +273,10 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
}
ret = cci_enumerate_feature_devs(pcidev);
- if (ret) {
- dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
- goto disable_error_report_exit;
- }
+ if (!ret)
+ return ret;
- return ret;
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
disable_error_report_exit:
pci_disable_pcie_error_reporting(pcidev);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 990994874bf1..649958a36e62 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -10,7 +10,9 @@
* Wu Hao <hao.wu@intel.com>
* Xiao Guangrong <guangrong.xiao@linux.intel.com>
*/
+#include <linux/fpga-dfl.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include "dfl.h"
@@ -421,6 +423,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
*
* @dev: device to enumerate.
* @cdev: the container device for all feature devices.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
* @feature_dev: current feature device.
* @ioaddr: header register region address of feature device in enumeration.
* @sub_features: a sub features linked list for feature device in enumeration.
@@ -429,6 +434,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
struct build_feature_devs_info {
struct device *dev;
struct dfl_fpga_cdev *cdev;
+ unsigned int nr_irqs;
+ int *irq_table;
+
struct platform_device *feature_dev;
void __iomem *ioaddr;
struct list_head sub_features;
@@ -442,12 +450,16 @@ struct build_feature_devs_info {
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
+ * @irq_base: start of irq index in this sub feature.
+ * @nr_irqs: number of irqs of this sub feature.
*/
struct dfl_feature_info {
u64 fid;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
+ unsigned int irq_base;
+ unsigned int nr_irqs;
};
static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
@@ -487,8 +499,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* it will be automatically freed by device's release() callback,
* platform_device_release().
*/
- pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
- GFP_KERNEL);
+ pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -520,13 +531,30 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
struct dfl_feature *feature = &pdata->features[index];
+ struct dfl_feature_irq_ctx *ctx;
+ unsigned int i;
/* save resource information for each feature */
+ feature->dev = fdev;
feature->id = finfo->fid;
feature->resource_index = index;
feature->ioaddr = finfo->ioaddr;
fdev->resource[index++] = finfo->mmio_res;
+ if (finfo->nr_irqs) {
+ ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
+ sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < finfo->nr_irqs; i++)
+ ctx[i].irq =
+ binfo->irq_table[finfo->irq_base + i];
+
+ feature->irq_ctx = ctx;
+ feature->nr_irqs = finfo->nr_irqs;
+ }
+
list_del(&finfo->node);
kfree(finfo);
}
@@ -638,6 +666,78 @@ static u64 feature_id(void __iomem *start)
return 0;
}
+static int parse_feature_irqs(struct build_feature_devs_info *binfo,
+ resource_size_t ofst, u64 fid,
+ unsigned int *irq_base, unsigned int *nr_irqs)
+{
+ void __iomem *base = binfo->ioaddr + ofst;
+ unsigned int i, ibase, inr = 0;
+ int virq;
+ u64 v;
+
+ /*
+ * Ideally DFL framework should only read info from DFL header, but
+ * current version DFL only provides mmio resources information for
+ * each feature in DFL Header, no field for interrupt resources.
+ * Interrupt resource information is provided by specific mmio
+ * registers of each private feature which supports interrupt. So in
+ * order to parse and assign irq resources, DFL framework has to look
+ * into specific capability registers of these private features.
+ *
+ * Once future DFL version supports generic interrupt resource
+ * information in common DFL headers, the generic interrupt parsing
+ * code will be added. But in order to be compatible to old version
+ * DFL, the driver may still fall back to these quirks.
+ */
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+
+ if (!inr) {
+ *irq_base = 0;
+ *nr_irqs = 0;
+ return 0;
+ }
+
+ dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n",
+ fid, ibase, inr);
+
+ if (ibase + inr > binfo->nr_irqs) {
+ dev_err(binfo->dev,
+ "Invalid interrupt number in feature 0x%llx\n", fid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < inr; i++) {
+ virq = binfo->irq_table[ibase + i];
+ if (virq < 0 || virq > NR_IRQS) {
+ dev_err(binfo->dev,
+ "Invalid irq table entry for feature 0x%llx\n",
+ fid);
+ return -EINVAL;
+ }
+ }
+
+ *irq_base = ibase;
+ *nr_irqs = inr;
+
+ return 0;
+}
+
/*
* when create sub feature instances, for private features, it doesn't need
* to provide resource size and feature id as they could be read from DFH
@@ -650,7 +750,9 @@ create_feature_instance(struct build_feature_devs_info *binfo,
struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
resource_size_t size, u64 fid)
{
+ unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ int ret;
/* read feature size and id if inputs are invalid */
size = size ? size : feature_size(dfl->ioaddr + ofst);
@@ -659,6 +761,10 @@ create_feature_instance(struct build_feature_devs_info *binfo,
if (dfl->len - ofst < size)
return -EINVAL;
+ ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
+ if (ret)
+ return ret;
+
finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
if (!finfo)
return -ENOMEM;
@@ -667,6 +773,8 @@ create_feature_instance(struct build_feature_devs_info *binfo,
finfo->mmio_res.start = dfl->start + ofst;
finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->irq_base = irq_base;
+ finfo->nr_irqs = nr_irqs;
finfo->ioaddr = dfl->ioaddr + ofst;
list_add_tail(&finfo->node, &binfo->sub_features);
@@ -853,6 +961,10 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
devm_kfree(dev, dfl);
}
+ /* remove irq table */
+ if (info->irq_table)
+ devm_kfree(dev, info->irq_table);
+
devm_kfree(dev, info);
put_device(dev);
}
@@ -892,6 +1004,45 @@ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
}
EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+/**
+ * dfl_fpga_enum_info_add_irq - add irq table to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
+ * this device.
+ *
+ * One FPGA device may have several interrupts. This function adds irq
+ * information of the DFL fpga device to enum info for next step enumeration.
+ * This function should be called before dfl_fpga_feature_devs_enumerate().
+ * As we only support one irq domain for all DFLs in the same enum info, adding
+ * irq table a second time for the same enum info will return error.
+ *
+ * If we need to enumerate DFLs which belong to different irq domains, we
+ * should fill more enum info and enumerate them one by one.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table)
+{
+ if (!nr_irqs || !irq_table)
+ return -EINVAL;
+
+ if (info->irq_table)
+ return -EEXIST;
+
+ info->irq_table = devm_kmemdup(info->dev, irq_table,
+ sizeof(int) * nr_irqs, GFP_KERNEL);
+ if (!info->irq_table)
+ return -ENOMEM;
+
+ info->nr_irqs = nr_irqs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
+
static int remove_feature_dev(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -959,6 +1110,10 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
binfo->dev = info->dev;
binfo->cdev = cdev;
+ binfo->nr_irqs = info->nr_irqs;
+ if (info->nr_irqs)
+ binfo->irq_table = info->irq_table;
+
/*
* start enumeration for all feature devices based on Device Feature
* Lists.
@@ -1241,6 +1396,160 @@ done:
}
EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
+static irqreturn_t dfl_irq_handler(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = arg;
+
+ eventfd_signal(trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
+ int fd)
+{
+ struct platform_device *pdev = feature->dev;
+ struct eventfd_ctx *trigger;
+ int irq, ret;
+
+ irq = feature->irq_ctx[idx].irq;
+
+ if (feature->irq_ctx[idx].trigger) {
+ free_irq(irq, feature->irq_ctx[idx].trigger);
+ kfree(feature->irq_ctx[idx].name);
+ eventfd_ctx_put(feature->irq_ctx[idx].trigger);
+ feature->irq_ctx[idx].trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+ feature->irq_ctx[idx].name =
+ kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx,
+ dev_name(&pdev->dev), feature->id);
+ if (!feature->irq_ctx[idx].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ ret = PTR_ERR(trigger);
+ goto free_name;
+ }
+
+ ret = request_irq(irq, dfl_irq_handler, 0,
+ feature->irq_ctx[idx].name, trigger);
+ if (!ret) {
+ feature->irq_ctx[idx].trigger = trigger;
+ return ret;
+ }
+
+ eventfd_ctx_put(trigger);
+free_name:
+ kfree(feature->irq_ctx[idx].name);
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
+ *
+ * @feature: dfl sub feature.
+ * @start: start of irq index in this dfl sub feature.
+ * @count: number of irqs.
+ * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
+ * unbind "count" specified number of irqs if fds ptr is NULL.
+ *
+ * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
+ * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
+ * NULL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds)
+{
+ unsigned int i;
+ int ret = 0;
+
+ /* overflow */
+ if (unlikely(start + count < start))
+ return -EINVAL;
+
+ /* exceeds nr_irqs */
+ if (start + count > feature->nr_irqs)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ int fd = fds ? fds[i] : -1;
+
+ ret = do_set_irq_trigger(feature, start + i, fd);
+ if (ret) {
+ while (i--)
+ do_set_irq_trigger(feature, start + i, -1);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
+
+/**
+ * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ return put_user(feature->nr_irqs, (__u32 __user *)arg);
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
+
+/**
+ * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
+ * @pdev: the feature device which has the sub feature
+ * @feature: the dfl sub feature
+ * @arg: ioctl argument
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_irq_set hdr;
+ s32 *fds;
+ long ret;
+
+ if (!feature->nr_irqs)
+ return -ENOENT;
+
+ if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
+ (hdr.start + hdr.count < hdr.start))
+ return -EINVAL;
+
+ fds = memdup_user((void __user *)(arg + sizeof(hdr)),
+ hdr.count * sizeof(s32));
+ if (IS_ERR(fds))
+ return PTR_ERR(fds);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
+ mutex_unlock(&pdata->lock);
+
+ kfree(fds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
+
static void __exit dfl_fpga_exit(void)
{
dfl_chardev_uinit();
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 2f5d3052e36e..a32dfba2a88b 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,7 +17,9 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/eventfd.h>
#include <linux/fs.h>
+#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/platform_device.h>
@@ -112,6 +114,13 @@
#define FME_PORT_OFST_ACC_VF 1
#define FME_PORT_OFST_IMP BIT_ULL(60)
+/* FME Error Capability Register */
+#define FME_ERROR_CAP 0x70
+
+/* FME Error Capability Register Bitfield */
+#define FME_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define FME_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
/* PORT Header Register Set */
#define PORT_HDR_DFH DFH
#define PORT_HDR_GUID_L GUID_L
@@ -145,6 +154,20 @@
#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
+/* Port Error Capability Register */
+#define PORT_ERROR_CAP 0x38
+
+/* Port Error Capability Register Bitfield */
+#define PORT_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */
+#define PORT_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */
+
+/* Port Uint Capability Register */
+#define PORT_UINT_CAP 0x8
+
+/* Port Uint Capability Register Bitfield */
+#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
+#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -189,20 +212,39 @@ struct dfl_feature_driver {
};
/**
+ * struct dfl_feature_irq_ctx - dfl private feature interrupt context
+ *
+ * @irq: Linux IRQ number of this interrupt.
+ * @trigger: eventfd context to signal when interrupt happens.
+ * @name: irq name needed when requesting irq.
+ */
+struct dfl_feature_irq_ctx {
+ int irq;
+ struct eventfd_ctx *trigger;
+ char *name;
+};
+
+/**
* struct dfl_feature - sub feature of the feature devices
*
+ * @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
+ * @irq_ctx: interrupt context list.
+ * @nr_irqs: number of interrupt contexts.
* @ops: ops of this sub feature.
* @priv: priv data of this feature.
*/
struct dfl_feature {
+ struct platform_device *dev;
u64 id;
int resource_index;
void __iomem *ioaddr;
+ struct dfl_feature_irq_ctx *irq_ctx;
+ unsigned int nr_irqs;
const struct dfl_feature_ops *ops;
void *priv;
};
@@ -299,12 +341,6 @@ struct dfl_feature_ops {
#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
-static inline int dfl_feature_platform_data_size(const int num)
-{
- return sizeof(struct dfl_feature_platform_data) +
- num * sizeof(struct dfl_feature);
-}
-
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs);
@@ -390,10 +426,14 @@ static inline u8 dfl_feature_revision(void __iomem *base)
*
* @dev: parent device.
* @dfls: list of device feature lists.
+ * @nr_irqs: number of irqs for all feature devices.
+ * @irq_table: Linux IRQ numbers for all irqs, indexed by hw irq numbers.
*/
struct dfl_fpga_enum_info {
struct device *dev;
struct list_head dfls;
+ unsigned int nr_irqs;
+ int *irq_table;
};
/**
@@ -417,6 +457,8 @@ struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
resource_size_t start, resource_size_t len,
void __iomem *ioaddr);
+int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
+ unsigned int nr_irqs, int *irq_table);
void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
/**
@@ -468,4 +510,13 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
+int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
+ unsigned int count, int32_t *fds);
+long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned long arg);
+
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab9028940a..2deccacc3aa7 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -328,7 +328,7 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
void *priv)
{
struct fpga_bridge *bridge;
- int id, ret = 0;
+ int id, ret;
if (!name || !strlen(name)) {
dev_err(dev, "Attempt to register with no name!\n");
@@ -340,10 +340,8 @@ struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
return NULL;
id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
+ if (id < 0)
goto error_kfree;
- }
mutex_init(&bridge->mutex);
INIT_LIST_HEAD(&bridge->node);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index e05104f5e40c..f38bab01432e 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -581,10 +581,8 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
return NULL;
id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
+ if (id < 0)
goto error_kfree;
- }
mutex_init(&mgr->ref_mutex);
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 272ee0c22822..2967aa2a74e2 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Xilinx Spartan6 Slave Serial SPI Driver
+ * Xilinx Spartan6 and 7 Series Slave Serial SPI Driver
*
* Copyright (C) 2017 DENX Software Engineering
*
@@ -23,6 +23,7 @@
struct xilinx_spi_conf {
struct spi_device *spi;
struct gpio_desc *prog_b;
+ struct gpio_desc *init_b;
struct gpio_desc *done;
};
@@ -36,13 +37,45 @@ static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+/**
+ * wait_for_init_b - wait for the INIT_B pin to have a given state, or wait
+ * a given delay if the pin is unavailable
+ *
+ * @mgr: The FPGA manager object
+ * @value: Value INIT_B to wait for (1 = asserted = low)
+ * @alt_udelay: Delay to wait if the INIT_B GPIO is not available
+ *
+ * Returns 0 when the INIT_B GPIO reached the given state or -ETIMEDOUT if
+ * too much time passed waiting for that. If no INIT_B GPIO is available
+ * then always return 0.
+ */
+static int wait_for_init_b(struct fpga_manager *mgr, int value,
+ unsigned long alt_udelay)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ if (conf->init_b) {
+ while (time_before(jiffies, timeout)) {
+ /* dump_state(conf, "wait for init_d .."); */
+ if (gpiod_get_value(conf->init_b) == value)
+ return 0;
+ usleep_range(100, 400);
+ }
+ return -ETIMEDOUT;
+ }
+
+ udelay(alt_udelay);
+
+ return 0;
+}
+
static int xilinx_spi_write_init(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count)
{
struct xilinx_spi_conf *conf = mgr->priv;
- const size_t prog_latency_7500us = 7500;
- const size_t prog_pulse_1us = 1;
+ int err;
if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
@@ -51,17 +84,28 @@ static int xilinx_spi_write_init(struct fpga_manager *mgr,
gpiod_set_value(conf->prog_b, 1);
- udelay(prog_pulse_1us); /* min is 500 ns */
+ err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
+ if (err) {
+ dev_err(&mgr->dev, "INIT_B pin did not go low\n");
+ gpiod_set_value(conf->prog_b, 0);
+ return err;
+ }
gpiod_set_value(conf->prog_b, 0);
+ err = wait_for_init_b(mgr, 0, 0);
+ if (err) {
+ dev_err(&mgr->dev, "INIT_B pin did not go high\n");
+ return err;
+ }
+
if (gpiod_get_value(conf->done)) {
dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
return -EIO;
}
/* program latency */
- usleep_range(prog_latency_7500us, prog_latency_7500us + 100);
+ usleep_range(7500, 7600);
return 0;
}
@@ -156,6 +200,13 @@ static int xilinx_spi_probe(struct spi_device *spi)
return PTR_ERR(conf->prog_b);
}
+ conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+ if (IS_ERR(conf->init_b)) {
+ dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
+ PTR_ERR(conf->init_b));
+ return PTR_ERR(conf->init_b);
+ }
+
conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
if (IS_ERR(conf->done)) {
dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 219eb0054233..e3e88510aec7 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,7 +26,7 @@
/**
* of_gpio_spi_cs_get_count() - special GPIO counting for SPI
* Some elder GPIO controllers need special quirks. Currently we handle
- * the Freescale GPIO controller with bindings that doesn't use the
+ * the Freescale and PPC GPIO controller with bindings that doesn't use the
* established "cs-gpios" for chip selects but instead rely on
* "gpios" for the chip select lines. If we detect this, we redirect
* the counting of "cs-gpios" to count "gpios" transparent to the
@@ -41,7 +41,8 @@ static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
if (!con_id || strcmp(con_id, "cs"))
return 0;
if (!of_device_is_compatible(np, "fsl,spi") &&
- !of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
+ !of_device_is_compatible(np, "aeroflexgaisler,spictrl") &&
+ !of_device_is_compatible(np, "ibm,ppc4xx-spi"))
return 0;
return of_gpio_named_count(np, "gpios");
}
@@ -405,9 +406,10 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return ERR_PTR(-ENOENT);
- /* Allow this specifically for Freescale devices */
+ /* Allow this specifically for Freescale and PPC devices */
if (!of_device_is_compatible(np, "fsl,spi") &&
- !of_device_is_compatible(np, "aeroflexgaisler,spictrl"))
+ !of_device_is_compatible(np, "aeroflexgaisler,spictrl") &&
+ !of_device_is_compatible(np, "ibm,ppc4xx-spi"))
return ERR_PTR(-ENOENT);
/* Allow only if asking for "cs-gpios" */
if (!con_id || strcmp(con_id, "cs"))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d7e17e34fee1..21292098bc02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info = {};
+ struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
+ memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ebb8a28ff002..02e6f8c4dde0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 86ffa0c2880f..710edc70e37e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* the same resource. If we have a new DC context as part of
* the DM atomic state from validation we need to free it and
* retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
*/
- struct dm_atomic_state *new_dm_state, *old_dm_state;
- new_dm_state = dm_atomic_get_new_state(state);
- old_dm_state = dm_atomic_get_old_state(state);
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
- if (new_dm_state && old_dm_state) {
- if (new_dm_state->context)
- dc_release_state(new_dm_state->context);
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
- new_dm_state->context = old_dm_state->context;
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
- if (old_dm_state->context)
- dc_retain_state(old_dm_state->context);
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 05d8373888e8..079f46f5cdb6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
+ bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 87b58c1acff4..648eb23d0784 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
+ adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index b14d725bf609..c7bc194bbce3 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
struct drm_panel *panel;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 389c1f029774..a282dc962199 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -986,7 +986,7 @@ static void sii8620_set_auto_zone(struct sii8620 *ctx)
static void sii8620_stop_video(struct sii8620 *ctx)
{
- u8 uninitialized_var(val);
+ u8 val;
sii8620_write_seq_static(ctx,
REG_TPI_INTR_EN, 0,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fed653f13c26..b98fa573e706 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3051,7 +3051,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
- int uninitialized_var(width), height;
+ int width, height;
cvt = &(timing->data.other_data.data.cvt[i]);
if (!memcmp(cvt->code, empty, 3))
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5609e164805f..89cfd68ef400 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
- memcpy(dst, src, len);
+ if (!fb_helper->dev->mode_config.fbdev_use_iomem)
+ memcpy(dst, src, len);
+ else
+ memcpy_toio((void __iomem *)dst, src, len);
+
src += fb->pitches[0];
dst += fb->pitches[0];
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 7bf628e13023..ee2058ad482c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -871,9 +871,6 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
- drm_gem_object_put_unlocked(obj);
if (ret)
- return ret;
+ goto err;
args->handle = handle;
args->size = obj->size;
- return 0;
+err:
+ drm_gem_object_put_unlocked(obj);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index bb27c82757f1..bf7888ad9ad4 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
}
}
- tr.len = chunk;
+ tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index b50b44e76279..8fc3f67e3e76 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
- if (!current_pt || pixels_type != current_pt) {
- of_node_put(remote_port);
+ if (!current_pt || pixels_type != current_pt)
return -EINVAL;
- }
}
return pixels_type;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index ee96a95fb6be..7a6f6df5e954 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -547,9 +547,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
unsigned long best_freq = 0;
u32 min_delta = 0xffffffff;
u8 p_min, p_max;
- u8 _p, uninitialized_var(best_p);
- u16 _m, uninitialized_var(best_m);
- u8 _s, uninitialized_var(best_s);
+ u8 _p, best_p;
+ u16 _m, best_m;
+ u8 _s, best_s;
p_min = DIV_ROUND_UP(fin, (12 * MHZ));
p_max = fin / (6 * MHZ);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 412572f88b67..61807e4b1aca 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -461,7 +461,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
unsigned int size, unsigned int fb_cpp)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- struct drm_mm_node *uninitialized_var(compressed_llb);
+ struct drm_mm_node *compressed_llb;
int ret;
drm_WARN_ON(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index cb07e1d2a353..f6bc6852892d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1103,7 +1103,7 @@ static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
- struct list_head *uninitialized_var(pl);
+ struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->active.lock);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a61cb8ca4d50..c8fd2bcb17ee 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1991,7 +1991,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
unsigned int slow_timeout_ms,
u32 *out_value)
{
- u32 uninitialized_var(reg_value);
+ u32 reg_value;
#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 08802e5177f6..4d2290f88edb 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
- if (!mcde->video_mode)
- /* Send a single frame using software sync */
- mcde_display_send_one_frame(mcde);
+ if (!mcde->video_mode) {
+ /*
+ * Send a single frame using software sync if the flow
+ * is not active yet.
+ */
+ if (mcde->flow_active == 0)
+ mcde_display_send_one_frame(mcde);
+ }
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 7cd8f415fd02..e6e134ae9c32 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -487,6 +487,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
mtk_crtc_ddp_config(crtc, cmdq_handle);
+ cmdq_pkt_finalize(cmdq_handle);
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 519f99868e35..800b7757252e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
- core->func->update(core, interlock, false);
+ nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
@@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
- if (disp->disp->object.oclass >= GF110_DISP)
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 496c4621cc78..07373bbc2acf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
uint32_t *tile_mode,
uint8_t *kind)
{
+ struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
@@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
+
+ if ((modifier & (0xffull << 12)) == 0ull) {
+ /* Legacy modifier. Translate to this dev's 'kind.' */
+ modifier |= disp->format_modifiers[0] & (0xffull << 12);
+ }
+
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
@@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
}
}
+static const u64 legacy_modifiers[] = {
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
@@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
(disp->format_modifiers[mod] != modifier);
mod++);
- if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
- return -EINVAL;
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
+ for (mod = 0;
+ (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+ (legacy_modifiers[mod] != modifier);
+ mod++);
+ if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+ return -EINVAL;
+ }
nouveau_decode_mod(drm, modifier, tile_mode, kind);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3d11b84d4cf9..d5c23d1c20d8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@@ -590,6 +590,7 @@ fini:
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
+ drm->fbcon = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index dcf08249374a..dffcac249211 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
{
struct nvkm_ior *ior;
- /* First preference is to reuse the OR that is currently armed
- * on HW, if any, in order to prevent unnecessary switching.
- */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !!ior->func->hda.hpd == hda &&
- !ior->asy.outp && ior->arm.outp == outp)
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
-
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
@@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_ior(outp, user, ior);
}
+ /* First preference is to reuse the OR that is currently armed
+ * on HW, if any, in order to prevent unnecessary switching.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
+ /*XXX: For various complicated reasons, we can't outright switch
+ * the boot-time OR on the first modeset without some fairly
+ * invasive changes.
+ *
+ * The systems that were fixed by modifying the OR selection
+ * code to account for HDA support shouldn't regress here as
+ * the HDA-enabled ORs match the relevant output's pad macro
+ * index, and the firmware seems to select an OR this way.
+ *
+ * This warning is to make it obvious if that proves wrong.
+ */
+ WARN_ON(hda && !ior->func->hda.hpd);
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+ }
+
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 46fe1805c588..2649469070aa 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
- .hsync_start = 1200 + 80,
- .hsync_end = 1200 + 80 + 24,
- .htotal = 1200 + 80 + 24 + 36,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 24,
+ .htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5178f87d6574..4aeb960ccf15 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
.height = 165,
},
.delay = {
- .hpd_absent_delay = 200,
+ /*
+ * When power is first given to the panel there's a short
+ * spike on the HPD line. It was explained that this spike
+ * was until the TCON data download was complete. On
+ * one system this was measured at 8 ms. We'll put 15 ms
+ * in the prepare delay just to be safe and take it away
+ * from the hpd_absent_delay (which would otherwise be 200 ms)
+ * to handle this. That means:
+ * - If HPD isn't hooked up you still have 200 ms delay.
+ * - If HPD is hooked up we won't try to look at it for the
+ * first 15 ms.
+ */
+ .prepare = 15,
+ .hpd_absent_delay = 185,
+
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 3feff0c45b3f..542dcf7eddd6 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -517,8 +517,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long best_freq = 0;
unsigned long fvco_min, fvco_max, fin, fout;
unsigned int min_prediv, max_prediv;
- unsigned int _prediv, uninitialized_var(best_prediv);
- unsigned long _fbdiv, uninitialized_var(best_fbdiv);
+ unsigned int _prediv, best_prediv;
+ unsigned long _fbdiv, best_fbdiv;
unsigned long min_delta = ULONG_MAX;
dsi->format = format;
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 366716f11b1a..1df6ab5d339d 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -759,7 +759,7 @@ static int check_urb_status(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "%s: overflow actual length is %d\n",
__func__, urb->actual_length);
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index 67dbe6fda9a1..58ea374d8aaa 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -1233,7 +1233,7 @@ int gb_interface_add(struct gb_interface *intf)
case GB_INTERFACE_TYPE_GREYBUS:
dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
intf->vendor_id, intf->product_id);
- /* fall-through */
+ fallthrough;
case GB_INTERFACE_TYPE_UNIPRO:
dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
intf->ddbl1_manufacturer_id,
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 267eac00a3fb..29f5fed28c2a 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -41,10 +41,6 @@ MODULE_LICENSE("GPL");
/* set maximum interval as 1 second */
#define MAX_INTERVAL 1000
-#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
-#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
-#define MSR_F15H_PTSC 0xc0010280
-
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F4 0x15b4
struct fam15h_power_data {
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 16ebf38a9f66..1801804a7762 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -568,10 +568,7 @@ out:
}
static struct amba_id catu_ids[] = {
- {
- .id = 0x000bb9ee,
- .mask = 0x000fffff,
- },
+ CS_AMBA_ID(0x000bb9ee),
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 84f1dcb69827..1a3169e69bb1 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -226,9 +226,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = coresight_get_enabled_sink(true);
}
- if (!sink)
- goto err;
-
mask = &event_data->mask;
/*
@@ -254,6 +251,16 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
+ * No sink provided - look for a default sink for one of the
+ * devices. At present we only support topology where all CPUs
+ * use the same sink [N:1], so only need to find one sink. The
+ * coresight_build_path later will remove any CPU that does not
+ * attach to the sink, or if we have not found a sink.
+ */
+ if (!sink)
+ sink = coresight_find_default_sink(csdev);
+
+ /*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
@@ -267,6 +274,10 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
+ /* no sink found for any CPU - cannot trace */
+ if (!sink)
+ goto err;
+
/* If we don't have any CPUs ready for tracing, abort */
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 0c35cd5e0d1d..6d7d2169bfb2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -196,12 +196,14 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
- /*
- * Request to keep the trace unit powered and also
- * emulation of powerdown
- */
- writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
- drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up) {
+ /*
+ * Request to keep the trace unit powered and also
+ * emulation of powerdown
+ */
+ writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
+ TRCPDCR_PU, drvdata->base + TRCPDCR);
+ }
/* Enable the trace unit */
writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -476,10 +478,12 @@ static void etm4_disable_hw(void *info)
CS_UNLOCK(drvdata->base);
- /* power can be removed from the trace unit now */
- control = readl_relaxed(drvdata->base + TRCPDCR);
- control &= ~TRCPDCR_PU;
- writel_relaxed(control, drvdata->base + TRCPDCR);
+ if (!drvdata->skip_power_up) {
+ /* power can be removed from the trace unit now */
+ control = readl_relaxed(drvdata->base + TRCPDCR);
+ control &= ~TRCPDCR_PU;
+ writel_relaxed(control, drvdata->base + TRCPDCR);
+ }
control = readl_relaxed(drvdata->base + TRCPRGCTLR);
@@ -507,6 +511,12 @@ static void etm4_disable_hw(void *info)
readl_relaxed(drvdata->base + TRCSSCSRn(i));
}
+ /* read back the current counter values */
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ config->cntr_val[i] =
+ readl_relaxed(drvdata->base + TRCCNTVRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -1196,8 +1206,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
- state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
}
/*
@@ -1208,10 +1218,10 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
*/
for (i = 0; i < drvdata->numcidc; i++)
- state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+ state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+ state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
@@ -1309,18 +1319,18 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- writel_relaxed(state->trcacvr[i],
+ writeq_relaxed(state->trcacvr[i],
drvdata->base + TRCACVRn(i));
- writel_relaxed(state->trcacatr[i],
+ writeq_relaxed(state->trcacatr[i],
drvdata->base + TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writel_relaxed(state->trccidcvr[i],
+ writeq_relaxed(state->trccidcvr[i],
drvdata->base + TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- writel_relaxed(state->trcvmidcvr[i],
+ writeq_relaxed(state->trcvmidcvr[i],
drvdata->base + TRCVMIDCVRn(i));
writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
@@ -1468,6 +1478,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
+ if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4a695bf90582..b8283e1d6d88 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -133,7 +133,7 @@
#define ETMv4_MAX_CTXID_CMP 8
#define ETM_MAX_VMID_CMP 8
#define ETM_MAX_PE_CMP 8
-#define ETM_MAX_RES_SEL 16
+#define ETM_MAX_RES_SEL 32
#define ETM_MAX_SS_CMP 8
#define ETM_ARCH_V4 0x40
@@ -325,7 +325,7 @@ struct etmv4_save_state {
u32 trccntctlr[ETMv4_MAX_CNTR];
u32 trccntvr[ETMv4_MAX_CNTR];
- u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+ u32 trcrsctlr[ETM_MAX_RES_SEL];
u32 trcssccr[ETM_MAX_SS_CMP];
u32 trcsscsr[ETM_MAX_SS_CMP];
@@ -334,7 +334,7 @@ struct etmv4_save_state {
u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
- u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u64 trcvmidcvr[ETM_MAX_VMID_CMP];
u32 trccidcctlr0;
u32 trccidcctlr1;
u32 trcvmidcctlr0;
@@ -407,6 +407,8 @@ struct etmv4_save_state {
* @config: structure holding configuration parameters.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
+ * @skip_power_up: Indicates if an implementation can skip powering up
+ * the trace unit.
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -454,6 +456,7 @@ struct etmv4_drvdata {
struct etmv4_config config;
struct etmv4_save_state *save_state;
bool state_needs_restore;
+ bool skip_power_up;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index e4912abda3aa..bfd44231d7ad 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -27,9 +27,8 @@ static int coresight_alloc_conns(struct device *dev,
struct coresight_platform_data *pdata)
{
if (pdata->nr_outport) {
- pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
- sizeof(*pdata->conns),
- GFP_KERNEL);
+ pdata->conns = devm_kcalloc(dev, pdata->nr_outport,
+ sizeof(*pdata->conns), GFP_KERNEL);
if (!pdata->conns)
return -ENOMEM;
}
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 36c943ae94d5..f2dc625ea585 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -150,6 +150,8 @@ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct coresight_device *coresight_get_sink_by_id(u32 id);
+struct coresight_device *
+coresight_find_default_sink(struct coresight_device *csdev);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index e7dc1c31d20d..78acf29c49ca 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/coresight.h>
@@ -32,12 +33,14 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
* @spinlock: serialize enable/disable operations.
+ * @check_idfilter_val: check if the context is lost upon clock removal.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
+ bool check_idfilter_val;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -66,29 +69,43 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
int rc = 0;
- u32 reg;
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
+ u32 id0val, id1val;
CS_UNLOCK(drvdata->base);
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ id0val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0);
+ id1val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1);
+
+ /*
+ * Some replicator designs lose context when AMBA clocks are removed,
+ * so have a check for this.
+ */
+ if (drvdata->check_idfilter_val && id0val == 0x0 && id1val == 0x0)
+ id0val = id1val = 0xff;
+
+ if (id0val == 0xff && id1val == 0xff)
rc = coresight_claim_device_unlocked(drvdata->base);
+ if (!rc) {
+ switch (outport) {
+ case 0:
+ id0val = 0x0;
+ break;
+ case 1:
+ id1val = 0x0;
+ break;
+ default:
+ WARN_ON(1);
+ rc = -EINVAL;
+ }
+ }
+
/* Ensure that the outport is enabled. */
- if (!rc)
- writel_relaxed(0x00, drvdata->base + reg);
+ if (!rc) {
+ writel_relaxed(id0val, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(id1val, drvdata->base + REPLICATOR_IDFILTER1);
+ }
+
CS_LOCK(drvdata->base);
return rc;
@@ -239,6 +256,10 @@ static int replicator_probe(struct device *dev, struct resource *res)
desc.groups = replicator_groups;
}
+ if (fwnode_property_present(dev_fwnode(dev),
+ "qcom,replicator-loses-context"))
+ drvdata->check_idfilter_val = true;
+
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
@@ -348,16 +369,9 @@ static int dynamic_replicator_probe(struct amba_device *adev,
}
static const struct amba_id dynamic_replicator_ids[] = {
- {
- .id = 0x000bb909,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC-600 */
- .id = 0x000bb9ec,
- .mask = 0x000fffff,
- },
- { 0, 0 },
+ CS_AMBA_ID(0x000bb909),
+ CS_AMBA_ID(0x000bb9ec), /* Coresight SoC-600 */
+ {},
};
static struct amba_driver dynamic_replicator_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index b908ca104645..673d2f56ed1e 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -727,8 +727,6 @@ static int acpi_stm_get_stimulus_area(struct device *dev, struct resource *res)
struct acpi_device *adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
rc = acpi_dev_get_resources(adev, &res_list, NULL, NULL);
if (rc < 0)
return rc;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 36cce2bfb744..6375504ba8b0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -639,15 +639,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EINVAL;
- }
-
/* Re-enable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 625882bc8b08..b29c2db94d96 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1110,7 +1110,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
}
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 39fba1d16e6e..7040d583bed9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -484,7 +484,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
break;
case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
- desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &tmc_etr_cs_ops;
ret = tmc_etr_setup_caps(dev, devid,
coresight_get_uci_data(id));
@@ -496,6 +496,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
dev_list = &etf_devs;
@@ -538,6 +539,28 @@ out:
return ret;
}
+static void tmc_shutdown(struct amba_device *adev)
+{
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ if (drvdata->mode == CS_MODE_DISABLED)
+ goto out;
+
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ tmc_etr_disable_hw(drvdata);
+
+ /*
+ * We do not care about coresight unregister here unlike remove
+ * callback which is required for making coresight modular since
+ * the system is going down after this.
+ */
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb961),
/* Coresight SoC 600 TMC-ETR/ETS */
@@ -556,6 +579,7 @@ static struct amba_driver tmc_driver = {
.suppress_bind_attrs = true,
},
.probe = tmc_probe,
+ .shutdown = tmc_shutdown,
.id_table = tmc_ids,
};
builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 71de978575f3..6e8d2dc33d17 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -268,6 +268,7 @@ ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index f3efbb3b2b4d..e9c90f2de34a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -769,6 +769,171 @@ void coresight_release_path(struct list_head *path)
path = NULL;
}
+/* return true if the device is a suitable type for a default sink */
+static inline bool coresight_is_def_sink_type(struct coresight_device *csdev)
+{
+ /* sink & correct subtype */
+ if (((csdev->type == CORESIGHT_DEV_TYPE_SINK) ||
+ (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) &&
+ (csdev->subtype.sink_subtype >= CORESIGHT_DEV_SUBTYPE_SINK_BUFFER))
+ return true;
+ return false;
+}
+
+/**
+ * coresight_select_best_sink - return the best sink for use as default from
+ * the two provided.
+ *
+ * @sink: current best sink.
+ * @depth: search depth where current sink was found.
+ * @new_sink: new sink for comparison with current sink.
+ * @new_depth: search depth where new sink was found.
+ *
+ * Sinks prioritised according to coresight_dev_subtype_sink, with only
+ * subtypes CORESIGHT_DEV_SUBTYPE_SINK_BUFFER or higher being used.
+ *
+ * Where two sinks of equal priority are found, the sink closest to the
+ * source is used (smallest search depth).
+ *
+ * return @new_sink & update @depth if better than @sink, else return @sink.
+ */
+static struct coresight_device *
+coresight_select_best_sink(struct coresight_device *sink, int *depth,
+ struct coresight_device *new_sink, int new_depth)
+{
+ bool update = false;
+
+ if (!sink) {
+ /* first found at this level */
+ update = true;
+ } else if (new_sink->subtype.sink_subtype >
+ sink->subtype.sink_subtype) {
+ /* found better sink */
+ update = true;
+ } else if ((new_sink->subtype.sink_subtype ==
+ sink->subtype.sink_subtype) &&
+ (*depth > new_depth)) {
+ /* found same but closer sink */
+ update = true;
+ }
+
+ if (update)
+ *depth = new_depth;
+ return update ? new_sink : sink;
+}
+
+/**
+ * coresight_find_sink - recursive function to walk trace connections from
+ * source to find a suitable default sink.
+ *
+ * @csdev: source / current device to check.
+ * @depth: [in] search depth of calling dev, [out] depth of found sink.
+ *
+ * This will walk the connection path from a source (ETM) till a suitable
+ * sink is encountered and return that sink to the original caller.
+ *
+ * If current device is a plain sink return that & depth, otherwise recursively
+ * call child connections looking for a sink. Select best possible using
+ * coresight_select_best_sink.
+ *
+ * return best sink found, or NULL if not found at this node or child nodes.
+ */
+static struct coresight_device *
+coresight_find_sink(struct coresight_device *csdev, int *depth)
+{
+ int i, curr_depth = *depth + 1, found_depth = 0;
+ struct coresight_device *found_sink = NULL;
+
+ if (coresight_is_def_sink_type(csdev)) {
+ found_depth = curr_depth;
+ found_sink = csdev;
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK)
+ goto return_def_sink;
+ /* look past LINKSINK for something better */
+ }
+
+ /*
+ * Not a sink we want - or possible child sink may be better.
+ * recursively explore each port found on this element.
+ */
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child_dev, *sink = NULL;
+ int child_depth = curr_depth;
+
+ child_dev = csdev->pdata->conns[i].child_dev;
+ if (child_dev)
+ sink = coresight_find_sink(child_dev, &child_depth);
+
+ if (sink)
+ found_sink = coresight_select_best_sink(found_sink,
+ &found_depth,
+ sink,
+ child_depth);
+ }
+
+return_def_sink:
+ /* return found sink and depth */
+ if (found_sink)
+ *depth = found_depth;
+ return found_sink;
+}
+
+/**
+ * coresight_find_default_sink: Find a sink suitable for use as a
+ * default sink.
+ *
+ * @csdev: starting source to find a connected sink.
+ *
+ * Walks connections graph looking for a suitable sink to enable for the
+ * supplied source. Uses CoreSight device subtypes and distance from source
+ * to select the best sink.
+ *
+ * If a sink is found, then the default sink for this device is set and
+ * will be automatically used in future.
+ *
+ * Used in cases where the CoreSight user (perf / sysfs) has not selected a
+ * sink.
+ */
+struct coresight_device *
+coresight_find_default_sink(struct coresight_device *csdev)
+{
+ int depth = 0;
+
+ /* look for a default sink if we have not found for this device */
+ if (!csdev->def_sink)
+ csdev->def_sink = coresight_find_sink(csdev, &depth);
+ return csdev->def_sink;
+}
+
+static int coresight_remove_sink_ref(struct device *dev, void *data)
+{
+ struct coresight_device *sink = data;
+ struct coresight_device *source = to_coresight_device(dev);
+
+ if (source->def_sink == sink)
+ source->def_sink = NULL;
+ return 0;
+}
+
+/**
+ * coresight_clear_default_sink: Remove all default sink references to the
+ * supplied sink.
+ *
+ * If supplied device is a sink, then check all the bus devices and clear
+ * out all the references to this sink from the coresight_device def_sink
+ * parameter.
+ *
+ * @csdev: coresight sink - remove references to this from all sources.
+ */
+static void coresight_clear_default_sink(struct coresight_device *csdev)
+{
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK) ||
+ (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) {
+ bus_for_each_dev(&coresight_bustype, NULL, csdev,
+ coresight_remove_sink_ref);
+ }
+}
+
/** coresight_validate_source - make sure a source has the right credentials
* @csdev: the device structure for a source.
* @function: the function this was called from.
@@ -1358,6 +1523,7 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
+ coresight_clear_default_sink(csdev);
coresight_release_platform_data(csdev, csdev->pdata);
device_unregister(&csdev->dev);
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 735bf31a3fdf..88639e52c73a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -866,17 +866,6 @@ config I2C_PNX
This driver can also be built as a module. If so, the module
will be called i2c-pnx.
-config I2C_PUV3
- tristate "PKUnity v3 I2C bus support"
- depends on UNICORE32 && ARCH_PUV3
- select I2C_ALGOBIT
- help
- This driver supports the I2C IP inside the PKUnity-v3 SoC.
- This I2C bus controller is under AMBA/AXI bus.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-puv3.
-
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF) || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 306d5dc3f417..19aff0e45cb5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -88,7 +88,6 @@ obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
-obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
obj-$(CONFIG_I2C_QCOM_CCI) += i2c-qcom-cci.o
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
deleted file mode 100644
index 5cec5a36807d..000000000000
--- a/drivers/i2c/busses/i2c-puv3.c
+++ /dev/null
@@ -1,275 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * I2C driver for PKUnity-v3 SoC
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2010 Guan Xuetao
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-
-/*
- * Poll the i2c status register until the specified bit is set.
- * Returns 0 if timed out (100 msec).
- */
-static short poll_status(unsigned long bit)
-{
- int loop_cntr = 1000;
-
- if (bit & I2C_STATUS_TFNF) {
- do {
- udelay(10);
- } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0));
- } else {
- /* RXRDY handler */
- do {
- if (readl(I2C_TAR) == I2C_TAR_EEPROM)
- msleep(20);
- else
- udelay(10);
- } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0));
- }
-
- return (loop_cntr > 0);
-}
-
-static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
-{
- int i2c_reg = *buf;
-
- /* Read data */
- while (length--) {
- if (!poll_status(I2C_STATUS_TFNF)) {
- dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n");
- return -ETIMEDOUT;
- }
-
- /* send addr */
- writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
-
- /* get ready to next write */
- i2c_reg++;
-
- /* send read CMD */
- writel(I2C_DATACMD_READ, I2C_DATACMD);
-
- /* wait until the Rx FIFO have available */
- if (!poll_status(I2C_STATUS_RFNE)) {
- dev_dbg(&adap->dev, "RXRDY timeout\n");
- return -ETIMEDOUT;
- }
-
- /* read the data to buf */
- *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK);
- buf++;
- }
-
- return 0;
-}
-
-static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
-{
- int i2c_reg = *buf;
-
- /* Do nothing but storing the reg_num to a static variable */
- if (i2c_reg == -1) {
- printk(KERN_WARNING "Error i2c reg\n");
- return -ETIMEDOUT;
- }
-
- if (length == 1)
- return 0;
-
- buf++;
- length--;
- while (length--) {
- /* send addr */
- writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
-
- /* send write CMD */
- writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD);
-
- /* wait until the Rx FIFO have available */
- msleep(20);
-
- /* read the data to buf */
- i2c_reg++;
- buf++;
- }
-
- return 0;
-}
-
-/*
- * Generic i2c master transfer entrypoint.
- *
- */
-static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg,
- int num)
-{
- int i, ret;
- unsigned char swap;
-
- /* Disable i2c */
- writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
-
- /* Set the work mode and speed*/
- writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON);
-
- writel(pmsg->addr, I2C_TAR);
-
- /* Enable i2c */
- writel(I2C_ENABLE_ENABLE, I2C_ENABLE);
-
- dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num);
-
- for (i = 0; i < num; i++) {
- dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
- pmsg->flags & I2C_M_RD ? "read" : "writ",
- pmsg->len, pmsg->len > 1 ? "s" : "",
- pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
-
- if (pmsg->len && pmsg->buf) { /* sanity check */
- if (pmsg->flags & I2C_M_RD)
- ret = xfer_read(adap, pmsg->buf, pmsg->len);
- else
- ret = xfer_write(adap, pmsg->buf, pmsg->len);
-
- if (ret)
- return ret;
-
- }
- dev_dbg(&adap->dev, "transfer complete\n");
- pmsg++; /* next message */
- }
-
- /* XXX: fixup be16_to_cpu in bq27x00_battery.c */
- if (pmsg->addr == I2C_TAR_PWIC) {
- swap = pmsg->buf[0];
- pmsg->buf[0] = pmsg->buf[1];
- pmsg->buf[1] = swap;
- }
-
- return i;
-}
-
-/*
- * Return list of supported functionality.
- */
-static u32 puv3_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm puv3_i2c_algorithm = {
- .master_xfer = puv3_i2c_xfer,
- .functionality = puv3_i2c_func,
-};
-
-/*
- * Main initialization routine.
- */
-static int puv3_i2c_probe(struct platform_device *pdev)
-{
- struct i2c_adapter *adapter;
- struct resource *mem;
- int rc;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
-
- if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c"))
- return -EBUSY;
-
- adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
- if (adapter == NULL) {
- dev_err(&pdev->dev, "can't allocate interface!\n");
- rc = -ENOMEM;
- goto fail_nomem;
- }
- snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x",
- mem->start);
- adapter->algo = &puv3_i2c_algorithm;
- adapter->class = I2C_CLASS_HWMON;
- adapter->dev.parent = &pdev->dev;
-
- platform_set_drvdata(pdev, adapter);
-
- adapter->nr = pdev->id;
- rc = i2c_add_numbered_adapter(adapter);
- if (rc)
- goto fail_add_adapter;
-
- dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
- return 0;
-
-fail_add_adapter:
- kfree(adapter);
-fail_nomem:
- release_mem_region(mem->start, resource_size(mem));
-
- return rc;
-}
-
-static int puv3_i2c_remove(struct platform_device *pdev)
-{
- struct i2c_adapter *adapter = platform_get_drvdata(pdev);
- struct resource *mem;
-
- i2c_del_adapter(adapter);
-
- put_device(&pdev->dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int puv3_i2c_suspend(struct device *dev)
-{
- int poll_count;
- /* Disable the IIC */
- writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
- for (poll_count = 0; poll_count < 50; poll_count++) {
- if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE)
- udelay(25);
- }
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
-#define PUV3_I2C_PM (&puv3_i2c_pm)
-
-#else
-#define PUV3_I2C_PM NULL
-#endif
-
-static struct platform_driver puv3_i2c_driver = {
- .probe = puv3_i2c_probe,
- .remove = puv3_i2c_remove,
- .driver = {
- .name = "PKUnity-v3-I2C",
- .pm = PUV3_I2C_PM,
- }
-};
-
-module_platform_driver(puv3_i2c_driver);
-
-MODULE_DESCRIPTION("PKUnity v3 I2C driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:puv3_i2c");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7f130829bf01..dead5db3315a 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -559,6 +559,22 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->adap.dev.of_node = dev->of_node;
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+ ret = geni_icc_get(&gi2c->se, "qup-memory");
+ if (ret)
+ return ret;
+ /*
+ * Set the bus quota for core and cpu to a reasonable value for
+ * register access.
+ * Set quota for DDR based on bus speed.
+ */
+ gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
+ gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+ gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
+
+ ret = geni_icc_set_bw(&gi2c->se);
+ if (ret)
+ return ret;
+
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
dev_err(dev, "Error turning on resources %d\n", ret);
@@ -581,6 +597,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
return ret;
}
+ ret = geni_icc_disable(&gi2c->se);
+ if (ret)
+ return ret;
+
dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
gi2c->suspended = 1;
@@ -625,7 +645,7 @@ static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
gi2c->suspended = 1;
}
- return 0;
+ return geni_icc_disable(&gi2c->se);
}
static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
@@ -633,6 +653,10 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
int ret;
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+ ret = geni_icc_enable(&gi2c->se);
+ if (ret)
+ return ret;
+
ret = geni_se_resources_on(&gi2c->se);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index bc698240c4aa..15324bfbc6cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -415,7 +415,7 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
{
unsigned int i;
unsigned int len = i2c->msg->len - i2c->processed;
- u32 uninitialized_var(val);
+ u32 val;
u8 byte;
/* we only care for MBRF here. */
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 5427f047faf0..1589179d5eb9 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
- if (!client || !slave_cb) {
- WARN(1, "insufficient data\n");
+ if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
- }
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
+ if (IS_ERR_OR_NULL(client))
+ return -EINVAL;
+
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 7d4e5c08f133..05e18d658141 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -180,7 +180,7 @@ err:
static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
{
struct device *dev = hwif->gendev.parent;
- acpi_handle uninitialized_var(dev_handle);
+ acpi_handle dev_handle;
u64 pcidevfn;
acpi_handle chan_handle;
int err;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 80bc3bf82f4d..2162bc80f09e 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -609,7 +609,7 @@ static int ide_delayed_transfer_pc(ide_drive_t *drive)
static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
{
- struct ide_atapi_pc *uninitialized_var(pc);
+ struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
ide_expiry_t *expiry;
diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
index 18c20a7aa0ce..94bdcf1ea186 100644
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -173,7 +173,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
if (io_32bit) {
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
if ((io_32bit & 2) && !mmio) {
local_irq_save(flags);
@@ -217,7 +217,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
if (io_32bit) {
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
if ((io_32bit & 2) && !mmio) {
local_irq_save(flags);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index c31f1d2b3b07..1a53c7a75224 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -622,12 +622,12 @@ static int drive_is_ready(ide_drive_t *drive)
void ide_timer_expiry (struct timer_list *t)
{
ide_hwif_t *hwif = from_timer(hwif, t, timer);
- ide_drive_t *uninitialized_var(drive);
+ ide_drive_t *drive;
ide_handler_t *handler;
unsigned long flags;
int wait = -1;
int plug_device = 0;
- struct request *uninitialized_var(rq_in_flight);
+ struct request *rq_in_flight;
spin_lock_irqsave(&hwif->lock, flags);
@@ -780,13 +780,13 @@ irqreturn_t ide_intr (int irq, void *dev_id)
{
ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
struct ide_host *host = hwif->host;
- ide_drive_t *uninitialized_var(drive);
+ ide_drive_t *drive;
ide_handler_t *handler;
unsigned long flags;
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
- struct request *uninitialized_var(rq_in_flight);
+ struct request *rq_in_flight;
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
index b9dfeb2e8bd6..c08a8a0916e2 100644
--- a/drivers/ide/ide-sysfs.c
+++ b/drivers/ide/ide-sysfs.c
@@ -131,7 +131,7 @@ static struct device_attribute *ide_port_attrs[] = {
int ide_sysfs_register_port(ide_hwif_t *hwif)
{
- int i, uninitialized_var(rc);
+ int i, rc;
for (i = 0; ide_port_attrs[i]; i++) {
rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index aab6a10435b6..a26f85ab58a9 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -229,7 +229,6 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
struct scatterlist *cursg = cmd->cursg;
- unsigned long uninitialized_var(flags);
struct page *page;
unsigned int offset;
u8 *buf;
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 870e235e30af..cf996f788292 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -108,7 +108,7 @@ static void umc_set_speeds(u8 speeds[])
static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_hwif_t *mate = hwif->mate;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
const u8 pio = drive->pio_mode - XFER_PIO_0;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f4495841bf68..8e0fb1a5bdbd 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -66,8 +66,6 @@ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags;
static bool disable_promotion_to_c1e;
-static bool lapic_timer_always_reliable;
-
struct idle_cpu {
struct cpuidle_state *state_table;
@@ -132,7 +130,7 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
- bool uninitialized_var(tick);
+ bool tick;
int cpu = smp_processor_id();
/*
@@ -142,7 +140,7 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
- if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) {
+ if (!static_cpu_has(X86_FEATURE_ARAT)) {
/*
* Switch over to one-shot tick broadcast if the target C-state
* is deeper than C1.
@@ -175,13 +173,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
* Invoked as a suspend-to-idle callback routine with frozen user space, frozen
* scheduler tick and suspended scheduler clock on the target CPU.
*/
-static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long eax = flg2MWAIT(drv->states[index].flags);
unsigned long ecx = 1; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
+
+ return 0;
}
/*
@@ -752,6 +752,35 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state icx_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 4,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 128,
+ .target_residency = 384,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
@@ -1056,6 +1085,12 @@ static const struct idle_cpu idle_cpu_skx __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_icx __initconst = {
+ .state_table = icx_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
@@ -1110,6 +1145,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
@@ -1562,7 +1598,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
{
struct cpuidle_device *dev;
- if (!lapic_timer_always_reliable)
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
tick_broadcast_enable();
/*
@@ -1655,16 +1691,13 @@ static int __init intel_idle_init(void)
goto init_driver_fail;
}
- if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_always_reliable = true;
-
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
intel_idle_cpu_online, NULL);
if (retval < 0)
goto hp_setup_fail;
pr_debug("Local APIC timer is reliable in %s\n",
- lapic_timer_always_reliable ? "all C-states" : "C1");
+ boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
return 0;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 655795bfa0ee..513825e424bf 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
+static void rdma_dim_destroy(struct ib_cq *cq)
+{
+ if (!cq->dim)
+ return;
+
+ cancel_work_sync(&cq->dim->work);
+ kfree(cq->dim);
+}
+
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
+ rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
+ rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
- if (cq->dim)
- cancel_work_sync(&cq->dim->work);
- kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 905a2beaf885..2927a9d16eaa 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1183,6 +1183,8 @@ static void setup_dma_device(struct ib_device *device)
struct device *parent = device->dev.parent;
WARN_ON_ONCE(device->dma_device);
+
+#ifdef CONFIG_DMA_OPS
if (device->dev.dma_ops) {
/*
* The caller provided custom DMA operations. Copy the
@@ -1203,7 +1205,9 @@ static void setup_dma_device(struct ib_device *device)
else
WARN_ON_ONCE(true);
}
- } else {
+ } else
+#endif /* CONFIG_DMA_OPS */
+ {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5b87eee8ccc8..d03dacaef788 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b48b3f6e632d..76e7ec0f0775 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1584,7 +1584,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
struct ib_uverbs_create_qp_resp resp;
struct ib_uqp_object *obj;
struct ib_xrcd *xrcd;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_uobject *xrcd_uobj;
struct ib_qp *qp;
struct ib_qp_open_attr attr = {};
int ret;
@@ -3406,7 +3406,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
struct ib_usrq_object *obj;
struct ib_pd *pd;
struct ib_srq *srq;
- struct ib_uobject *uninitialized_var(xrcd_uobj);
+ struct ib_uobject *xrcd_uobj;
struct ib_srq_init_attr attr;
int ret;
struct ib_device *ib_dev;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30e08bcc9afb..77bc02a9228e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3282,7 +3282,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
- struct in6_addr uninitialized_var(addr);
+ struct in6_addr addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b1bb61c65f4f..352b8af1998a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -754,7 +754,7 @@ skip_cqe:
static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct t4_cqe uninitialized_var(cqe);
+ struct t4_cqe cqe;
struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf51e3cbd969..f9ca6e000a81 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3541,11 +3541,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(size);
- unsigned uninitialized_var(seglen);
+ int size;
+ unsigned seglen;
__be32 dummy;
__be32 *lso_wqe;
- __be32 uninitialized_var(lso_hdr_sz);
+ __be32 lso_hdr_sz;
__be32 blh;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 0c18cb6a2f14..0133ebb8d740 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -925,8 +925,8 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
- int uninitialized_var(index);
- int uninitialized_var(inlen);
+ int index;
+ int inlen;
u32 *cqb = NULL;
void *cqc;
int cqe_size;
@@ -1246,7 +1246,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
__be64 *pas;
int page_shift;
int inlen;
- int uninitialized_var(cqe_size);
+ int cqe_size;
unsigned long flags;
if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 9454a66c12cc..655ea9c984e1 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2536,7 +2536,7 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
{
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub;
- struct devx_async_event_data *uninitialized_var(event);
+ struct devx_async_event_data *event;
int ret = 0;
size_t eventsz;
bool omit_data;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1ab676b66894..77dca1e05bba 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
- work->num_sge = i - 1;
- if (i)
- destroy_prefetch_work(work);
+ work->num_sge = i;
return false;
}
@@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e050eade97a1..1225b8d77510 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1766,15 +1766,14 @@ err:
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
- if (ucmd)
- allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
- mutex_init(&qp->mutex);
-
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
- configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
- if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
- * We don't return error if this flag was provided,
- * and mlx5 doesn't have right capability.
- */
- *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
+ default:
+ break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
+ mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 2c6df1c43b55..bc35dbe4855b 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -1249,7 +1249,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf;
void *cur_edge;
- int uninitialized_var(size);
+ int size;
unsigned long flags;
unsigned int idx;
int err = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d04c245359eb..c6e95d0d760a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1639,8 +1639,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1835,7 +1835,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
* without initializing size0, and it is in fact never used
* uninitialized.
*/
- int uninitialized_var(size0);
+ int size0;
int ind;
void *wqe;
void *prev_wqe;
@@ -1943,8 +1943,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 7db35dd6ad74..332a8ba94b81 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
- if (qp->r_rq.kwq)
- qp->r_rq.kwq->count = qp->r_rq.size;
qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0);
}
@@ -2367,31 +2365,6 @@ bad_lkey:
}
/**
- * get_count - count numbers of request work queue entries
- * in circular buffer
- * @rq: data structure for request queue entry
- * @tail: tail indices of the circular buffer
- * @head: head indices of the circular buffer
- *
- * Return - total number of entries in the circular buffer
- */
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
-{
- u32 count;
-
- count = head;
-
- if (count >= rq->size)
- count = 0;
- if (count < tail)
- count += rq->size - tail;
- else
- count -= tail;
-
- return count;
-}
-
-/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
* @ip: the QP
@@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
head = get_rvt_head(rq, ip);
- kwq->count = get_count(rq, tail, head);
+ kwq->count = rvt_get_rq_count(rq, head, tail);
}
if (unlikely(kwq->count == 0)) {
ret = 0;
@@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* the number of remaining WQEs.
*/
if (kwq->count < srq->limit) {
- kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
+ kwq->count =
+ rvt_get_rq_count(rq,
+ get_rvt_head(rq, ip), tail);
if (kwq->count < srq->limit) {
struct ib_event ev;
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 977906cc0d11..c58735f4c94a 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
+ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
}
/*
* Binary search the credit table to find the code to
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 7271d705f4b0..857be5a7d0bd 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -333,7 +333,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_srq *srq;
struct siw_wqe *wqe = NULL;
bool srq_event = false;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
srq = qp->srq;
if (srq) {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 5fe7a5633e33..dbe836c7ff47 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -46,7 +46,7 @@ struct omap_kp {
unsigned short keymap[];
};
-static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
+static DECLARE_TASKLET_DISABLED_OLD(kp_tasklet, omap_kp_tasklet);
static unsigned int *row_gpios;
static unsigned int *col_gpios;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index e1423f7648d6..65f4e9d62a67 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -77,7 +77,7 @@ static struct timer_list hil_mlcs_kicker;
static int hil_mlcs_probe;
static void hil_mlcs_process(unsigned long unused);
-static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0);
+static DECLARE_TASKLET_DISABLED_OLD(hil_mlcs_tasklet, hil_mlcs_process);
/* #define HIL_MLC_DEBUG */
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
deleted file mode 100644
index 50bb3ed94b56..000000000000
--- a/drivers/input/serio/i8042-unicore32io.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2011 Guan Xuetao
- */
-#ifndef _I8042_UNICORE32_H
-#define _I8042_UNICORE32_H
-
-#include <mach/hardware.h>
-
-/*
- * Names.
- */
-#define I8042_KBD_PHYS_DESC "isa0060/serio0"
-#define I8042_AUX_PHYS_DESC "isa0060/serio1"
-#define I8042_MUX_PHYS_DESC "isa0060/serio%d"
-
-/*
- * IRQs.
- */
-#define I8042_KBD_IRQ IRQ_PS2_KBD
-#define I8042_AUX_IRQ IRQ_PS2_AUX
-
-/*
- * Register numbers.
- */
-#define I8042_COMMAND_REG PS2_COMMAND
-#define I8042_STATUS_REG PS2_STATUS
-#define I8042_DATA_REG PS2_DATA
-
-#define I8042_REGION_START (resource_size_t)(PS2_DATA)
-#define I8042_REGION_SIZE (resource_size_t)(16)
-
-static inline int i8042_read_data(void)
-{
- return readb(I8042_DATA_REG);
-}
-
-static inline int i8042_read_status(void)
-{
- return readb(I8042_STATUS_REG);
-}
-
-static inline void i8042_write_data(int val)
-{
- writeb(val, I8042_DATA_REG);
-}
-
-static inline void i8042_write_command(int val)
-{
- writeb(val, I8042_COMMAND_REG);
-}
-
-static inline int i8042_platform_init(void)
-{
- if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
- return -EBUSY;
-
- i8042_reset = I8042_RESET_ALWAYS;
- return 0;
-}
-
-static inline void i8042_platform_exit(void)
-{
- release_mem_region(I8042_REGION_START, I8042_REGION_SIZE);
-}
-
-#endif /* _I8042_UNICORE32_H */
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index eb376700dfff..55381783dc82 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -21,8 +21,6 @@
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
#include "i8042-x86ia64io.h"
-#elif defined(CONFIG_UNICORE32)
-#include "i8042-unicore32io.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index e9647ebff187..1e4770094415 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -159,7 +159,7 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- char uninitialized_var(c);
+ char c;
ssize_t read = 0;
int error;
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 34d31c7ec8ba..620cdd7d214a 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -456,8 +456,6 @@ static void sur40_poll(struct input_dev *input)
{
struct sur40_state *sur40 = input_get_drvdata(input);
int result, bulk_read, need_blobs, packet_blobs, i;
- u32 uninitialized_var(packet_id);
-
struct sur40_header *header = &sur40->bulk_in_buffer->header;
struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0];
@@ -491,7 +489,7 @@ static void sur40_poll(struct input_dev *input)
if (need_blobs == -1) {
need_blobs = le16_to_cpu(header->count);
dev_dbg(sur40->dev, "need %d blobs\n", need_blobs);
- packet_id = le32_to_cpu(header->packet_id);
+ /* packet_id = le32_to_cpu(header->packet_id); */
}
/*
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 9e1ab701785c..befd111049c0 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -269,23 +269,22 @@ static int aggregate_requests(struct icc_node *node)
static int apply_constraints(struct icc_path *path)
{
struct icc_node *next, *prev = NULL;
+ struct icc_provider *p;
int ret = -EINVAL;
int i;
for (i = 0; i < path->num_nodes; i++) {
next = path->reqs[i].node;
+ p = next->provider;
- /*
- * Both endpoints should be valid master-slave pairs of the
- * same interconnect provider that will be configured.
- */
- if (!prev || next->provider != prev->provider) {
+ /* both endpoints should be valid master-slave pairs */
+ if (!prev || (p != prev->provider && !p->inter_set)) {
prev = next;
continue;
}
/* set the constraints */
- ret = next->provider->set(prev, next);
+ ret = p->set(prev, next);
if (ret)
goto out;
@@ -340,12 +339,12 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
* Returns a valid pointer to struct icc_node on success or ERR_PTR()
* on failure.
*/
-static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
struct icc_provider *provider;
- if (!spec || spec->args_count != 1)
+ if (!spec)
return ERR_PTR(-EINVAL);
mutex_lock(&icc_lock);
@@ -359,6 +358,7 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
return node;
}
+EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
static void devm_icc_release(struct device *dev, void *res)
{
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index 2a11a63e7217..a3d2ef1d9903 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -266,11 +266,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
if (!commit_idx[0])
goto out;
- ret = rpmh_invalidate(voter->dev);
- if (ret) {
- pr_err("Error invalidating RPMH client (%d)\n", ret);
- goto out;
- }
+ rpmh_invalidate(voter->dev);
ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
cmds, commit_idx);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b0f308cb7f7c..b622af72448f 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -97,6 +97,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
bool
+ select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select IRQ_MSI_IOMMU
@@ -183,6 +184,7 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64)
+ select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index d759e7234e98..c29fd0991857 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2236,7 +2236,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
- phys_addr_t uninitialized_var(pteval);
+ phys_addr_t pteval;
unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 9564d23d094f..aa096b333a99 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -628,13 +628,21 @@ out_free_table:
static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{
+ struct fwnode_handle *fn;
+
if (iommu && iommu->ir_table) {
if (iommu->ir_msi_domain) {
+ fn = iommu->ir_msi_domain->fwnode;
+
irq_domain_remove(iommu->ir_msi_domain);
+ irq_domain_free_fwnode(fn);
iommu->ir_msi_domain = NULL;
}
if (iommu->ir_domain) {
+ fn = iommu->ir_domain->fwnode;
+
irq_domain_remove(iommu->ir_domain);
+ irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
free_pages((unsigned long)iommu->ir_table->base,
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 20738aacac89..e505b9130a1c 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -118,46 +118,66 @@ static int of_iommu_xlate(struct device *dev,
return ret;
}
-struct of_pci_iommu_alias_info {
- struct device *dev;
- struct device_node *np;
-};
-
-static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+static int of_iommu_configure_dev_id(struct device_node *master_np,
+ struct device *dev,
+ const u32 *id)
{
- struct of_pci_iommu_alias_info *info = data;
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask",
- &iommu_spec.np, iommu_spec.args);
+ err = of_map_id(master_np, *id, "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
if (err)
return err == -ENODEV ? NO_IOMMU : err;
- err = of_iommu_xlate(info->dev, &iommu_spec);
+ err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np);
return err;
}
-static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
- struct device_node *master_np)
+static int of_iommu_configure_dev(struct device_node *master_np,
+ struct device *dev)
{
- struct of_phandle_args iommu_spec = { .args_count = 1 };
- int err;
-
- err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
- "iommu-map-mask", &iommu_spec.np,
- iommu_spec.args);
- if (err)
- return err == -ENODEV ? NO_IOMMU : err;
+ struct of_phandle_args iommu_spec;
+ int err = NO_IOMMU, idx = 0;
+
+ while (!of_parse_phandle_with_args(master_np, "iommus",
+ "#iommu-cells",
+ idx, &iommu_spec)) {
+ err = of_iommu_xlate(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ idx++;
+ if (err)
+ break;
+ }
- err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
- of_node_put(iommu_spec.np);
return err;
}
+struct of_pci_iommu_alias_info {
+ struct device *dev;
+ struct device_node *np;
+};
+
+static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct of_pci_iommu_alias_info *info = data;
+ u32 input_id = alias;
+
+ return of_iommu_configure_dev_id(info->np, info->dev, &input_id);
+}
+
+static int of_iommu_configure_device(struct device_node *master_np,
+ struct device *dev, const u32 *id)
+{
+ return (id) ? of_iommu_configure_dev_id(master_np, dev, id) :
+ of_iommu_configure_dev(master_np, dev);
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+ struct device_node *master_np,
+ const u32 *id)
{
const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -188,21 +208,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
pci_request_acs();
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
- } else if (dev_is_fsl_mc(dev)) {
- err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
} else {
- struct of_phandle_args iommu_spec;
- int idx = 0;
-
- while (!of_parse_phandle_with_args(master_np, "iommus",
- "#iommu-cells",
- idx, &iommu_spec)) {
- err = of_iommu_xlate(dev, &iommu_spec);
- of_node_put(iommu_spec.np);
- idx++;
- if (err)
- break;
- }
+ err = of_iommu_configure_device(master_np, dev, id);
fwspec = dev_iommu_fwspec_get(dev);
if (!err && fwspec)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 216b3b8392b5..bb70b7177f94 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -425,7 +425,7 @@ config GOLDFISH_PIC
for Goldfish based virtual platforms.
config QCOM_PDC
- bool "QCOM PDC"
+ tristate "QCOM PDC"
depends on ARCH_QCOM
select IRQ_DOMAIN_HIERARCHY
help
@@ -541,7 +541,6 @@ config LOONGSON_HTPIC
default y
select IRQ_DOMAIN
select GENERIC_IRQ_CHIP
- select I8259
help
Support for the Loongson-3 HyperTransport PIC Controller.
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
index 85cf6e0e0e52..476d6024aaf2 100644
--- a/drivers/irqchip/irq-ativic32.c
+++ b/drivers/irqchip/irq-ativic32.c
@@ -92,7 +92,7 @@ static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq,
return 0;
}
-static struct irq_domain_ops ativic32_ops = {
+static const struct irq_domain_ops ativic32_ops = {
.map = ativic32_irq_domain_map,
.xlate = irq_domain_xlate_onecell
};
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index fc1b3a9cdafc..fb4ad2aaa727 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -310,10 +310,16 @@ static void __init sama5d3_aic_irq_fixup(void)
aic_common_rtc_irq_fixup();
}
+static void __init sam9x60_aic_irq_fixup(void)
+{
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
+}
+
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
- { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup },
{ /* sentinel */ },
};
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index fd7c537fb42a..9dc9bf8cdcc4 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -28,6 +28,9 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/syscore_ops.h>
+#ifdef CONFIG_ARM
+#include <asm/smp_plat.h>
+#endif
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -327,7 +330,11 @@ static int bcm7038_l1_suspend(void)
u32 val;
/* Wakeup interrupt should only come from the boot cpu */
+#ifdef CONFIG_SMP
boot_cpu = cpu_logical_map(0);
+#else
+ boot_cpu = 0;
+#endif
list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
for (word = 0; word < intc->n_words; word++) {
@@ -347,7 +354,11 @@ static void bcm7038_l1_resume(void)
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
+#ifdef CONFIG_SMP
boot_cpu = cpu_logical_map(0);
+#else
+ boot_cpu = 0;
+#endif
list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
for (word = 0; word < intc->n_words; word++) {
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 586df3587be0..c7c9e976acbb 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -143,6 +143,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
irq_set_chained_handler_and_data(parent_irq,
bcm7120_l2_intc_irq_handle, l1_data);
+ if (data->can_wake)
+ enable_irq_wake(parent_irq);
+
return 0;
}
@@ -247,6 +250,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (ret < 0)
goto out_free_l1_data;
+ data->can_wake = of_property_read_bool(dn, "brcm,irq-can-wake");
+
for (irq = 0; irq < data->num_parent_irqs; irq++) {
ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret)
@@ -274,9 +279,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
goto out_free_domain;
}
- if (of_property_read_bool(dn, "brcm,irq-can-wake"))
- data->can_wake = true;
-
for (idx = 0; idx < data->n_words; idx++) {
irq = idx * IRQS_PER_WORD;
gc = irq_get_domain_generic_chip(data->domain, irq);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 0298ede67e51..cdd6a42d4efa 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -254,6 +254,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
*/
data->gc->wake_enabled = 0xffffffff;
ct->chip.irq_set_wake = irq_gc_set_wake;
+ enable_irq_wake(parent_irq);
}
pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq);
@@ -275,6 +276,10 @@ static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
+IRQCHIP_DECLARE(brcmstb_hif_spi_l2_intc, "brcm,hif-spi-l2-intc",
+ brcmstb_l2_edge_intc_of_init);
+IRQCHIP_DECLARE(brcmstb_upg_aux_aon_l2_intc, "brcm,upg-aux-aon-l2-intc",
+ brcmstb_l2_edge_intc_of_init);
static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 606efa64adff..634263dfd7b5 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -7,6 +7,8 @@
*
*/
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/irq.h>
@@ -23,6 +25,19 @@ static struct irq_chip its_msi_irq_chip = {
.irq_set_affinity = msi_domain_set_affinity
};
+static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
+ struct fsl_mc_device *mc_dev)
+{
+ struct device_node *of_node;
+ u32 out_id;
+
+ of_node = irq_domain_get_of_node(domain);
+ out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
+ iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
+
+ return out_id;
+}
+
static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
struct device *dev,
int nvec, msi_alloc_info_t *info)
@@ -43,7 +58,8 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
* NOTE: This device id corresponds to the IOMMU stream ID
* associated with the DPRC object (ICID).
*/
- info->scratchpad[0].ul = mc_bus_dev->icid;
+ info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain,
+ mc_bus_dev);
msi_info = msi_get_domain_info(msi_domain->parent);
/* Allocate at least 32 MSIs, and always as a power of 2 */
@@ -66,12 +82,71 @@ static const struct of_device_id its_device_id[] = {
{},
};
-static int __init its_fsl_mc_msi_init(void)
+static void __init its_fsl_mc_msi_init_one(struct fwnode_handle *handle,
+ const char *name)
{
- struct device_node *np;
struct irq_domain *parent;
struct irq_domain *mc_msi_domain;
+ parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n", name);
+ return;
+ }
+
+ mc_msi_domain = fsl_mc_msi_create_irq_domain(handle,
+ &its_fsl_mc_msi_domain_info,
+ parent);
+ if (!mc_msi_domain) {
+ pr_err("%s: unable to create fsl-mc domain\n", name);
+ return;
+ }
+
+ pr_info("fsl-mc MSI: %s domain created\n", name);
+}
+
+#ifdef CONFIG_ACPI
+static int __init
+its_fsl_mc_msi_parse_madt(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct fwnode_handle *dom_handle;
+ const char *node_name;
+ int err = 0;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
+ (long)its_entry->base_address);
+
+ dom_handle = iort_find_domain_token(its_entry->translation_id);
+ if (!dom_handle) {
+ pr_err("%s: Unable to locate ITS domain handle\n", node_name);
+ err = -ENXIO;
+ goto out;
+ }
+
+ its_fsl_mc_msi_init_one(dom_handle, node_name);
+
+out:
+ kfree(node_name);
+ return err;
+}
+
+
+static void __init its_fsl_mc_acpi_msi_init(void)
+{
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_fsl_mc_msi_parse_madt, 0);
+}
+#else
+static inline void its_fsl_mc_acpi_msi_init(void) { }
+#endif
+
+static void __init its_fsl_mc_of_msi_init(void)
+{
+ struct device_node *np;
+
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
@@ -79,23 +154,15 @@ static int __init its_fsl_mc_msi_init(void)
if (!of_property_read_bool(np, "msi-controller"))
continue;
- parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
- if (!parent || !msi_get_domain_info(parent)) {
- pr_err("%pOF: unable to locate ITS domain\n", np);
- continue;
- }
-
- mc_msi_domain = fsl_mc_msi_create_irq_domain(
- of_node_to_fwnode(np),
- &its_fsl_mc_msi_domain_info,
- parent);
- if (!mc_msi_domain) {
- pr_err("%pOF: unable to create fsl-mc domain\n", np);
- continue;
- }
-
- pr_info("fsl-mc MSI: %pOF domain created\n", np);
+ its_fsl_mc_msi_init_one(of_node_to_fwnode(np),
+ np->full_name);
}
+}
+
+static int __init its_fsl_mc_msi_init(void)
+{
+ its_fsl_mc_of_msi_init();
+ its_fsl_mc_acpi_msi_init();
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index beac4caefad9..95f097448f97 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2814,7 +2814,7 @@ static int allocate_vpe_l1_table(void)
if (val & GICR_VPROPBASER_4_1_VALID)
goto out;
- gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
+ gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
if (!gic_data_rdist()->vpe_table_mask)
return -ENOMEM;
@@ -2881,7 +2881,7 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
+ page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@@ -3523,6 +3523,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
msi_alloc_info_t *info = args;
struct its_device *its_dev = info->scratchpad[0].ptr;
struct its_node *its = its_dev->its;
+ struct irq_data *irqd;
irq_hw_number_t hwirq;
int err;
int i;
@@ -3542,7 +3543,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq + i, &its_irq_chip, its_dev);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
+ irqd = irq_get_irq_data(virq + i);
+ irqd_set_single_target(irqd);
+ irqd_set_affinity_on_activate(irqd);
pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq + i - its_dev->event_map.lpi_base),
(int)(hwirq + i), virq + i);
@@ -4087,18 +4090,22 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
static void its_vpe_4_1_invall(struct its_vpe *vpe)
{
void __iomem *rdbase;
+ unsigned long flags;
u64 val;
+ int cpu;
val = GICR_INVALLR_V;
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
/* Target the redistributor this vPE is currently known on */
- raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVALLR);
wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
}
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index cc46bc2d634b..324f280ff606 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -2116,7 +2116,7 @@ static void __init gic_acpi_setup_kvm_info(void)
}
static int __init
-gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
+gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
struct fwnode_handle *domain_handle;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index c17fabd6741e..a27ba2cc1dce 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1584,7 +1584,7 @@ static void __init gic_acpi_setup_kvm_info(void)
gic_set_kvm_info(&gic_v2_kvm_info);
}
-static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
+static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index c27577c81126..e35b7b09c3ab 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -53,6 +53,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#define CHANIER(n) (0x10 + (0x40 * n))
#define CHANIPR(n) (0x20 + (0x40 * n))
@@ -60,6 +61,8 @@
#define CHAN_MAX_NUM 0x8
struct intmux_irqchip_data {
+ struct irq_chip chip;
+ u32 saved_reg;
int chanidx;
int irq;
struct irq_domain *domain;
@@ -120,8 +123,10 @@ static struct irq_chip imx_intmux_irq_chip = {
static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
+ struct intmux_irqchip_data *data = h->host_data;
+
+ irq_set_chip_data(irq, data);
+ irq_set_chip_and_handler(irq, &data->chip, handle_level_irq);
return 0;
}
@@ -210,8 +215,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
return -EINVAL;
}
- data = devm_kzalloc(&pdev->dev, sizeof(*data) +
- channum * sizeof(data->irqchip_data[0]), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, irqchip_data, channum), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -232,6 +236,10 @@ static int imx_intmux_probe(struct platform_device *pdev)
data->channum = channum;
raw_spin_lock_init(&data->lock);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = clk_prepare_enable(data->ipg_clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
@@ -239,6 +247,8 @@ static int imx_intmux_probe(struct platform_device *pdev)
}
for (i = 0; i < channum; i++) {
+ data->irqchip_data[i].chip = imx_intmux_irq_chip;
+ data->irqchip_data[i].chip.parent_device = &pdev->dev;
data->irqchip_data[i].chanidx = i;
data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
@@ -267,6 +277,12 @@ static int imx_intmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ /*
+ * Let pm_runtime_put() disable clock.
+ * If CONFIG_PM is not enabled, the clock will stay powered.
+ */
+ pm_runtime_put(&pdev->dev);
+
return 0;
out:
clk_disable_unprepare(data->ipg_clk);
@@ -288,11 +304,56 @@ static int imx_intmux_remove(struct platform_device *pdev)
irq_domain_remove(data->irqchip_data[i].domain);
}
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_intmux_runtime_suspend(struct device *dev)
+{
+ struct intmux_data *data = dev_get_drvdata(dev);
+ struct intmux_irqchip_data *irqchip_data;
+ int i;
+
+ for (i = 0; i < data->channum; i++) {
+ irqchip_data = &data->irqchip_data[i];
+ irqchip_data->saved_reg = readl_relaxed(data->regs + CHANIER(i));
+ }
+
clk_disable_unprepare(data->ipg_clk);
return 0;
}
+static int imx_intmux_runtime_resume(struct device *dev)
+{
+ struct intmux_data *data = dev_get_drvdata(dev);
+ struct intmux_irqchip_data *irqchip_data;
+ int ret, i;
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < data->channum; i++) {
+ irqchip_data = &data->irqchip_data[i];
+ writel_relaxed(irqchip_data->saved_reg, data->regs + CHANIER(i));
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx_intmux_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(imx_intmux_runtime_suspend,
+ imx_intmux_runtime_resume, NULL)
+};
+
static const struct of_device_id imx_intmux_id[] = {
{ .compatible = "fsl,imx-intmux", },
{ /* sentinel */ },
@@ -302,6 +363,7 @@ static struct platform_driver imx_intmux_driver = {
.driver = {
.name = "imx-intmux",
.of_match_table = imx_intmux_id,
+ .pm = &imx_intmux_pm_ops,
},
.probe = imx_intmux_probe,
.remove = imx_intmux_remove,
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
index dd018c22ea83..63f72803c8c4 100644
--- a/drivers/irqchip/irq-loongson-htpic.c
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -93,10 +93,8 @@ int __init htpic_of_init(struct device_node *node, struct device_node *parent)
}
htpic = kzalloc(sizeof(*htpic), GFP_KERNEL);
- if (!htpic) {
- err = -ENOMEM;
- goto out_free;
- }
+ if (!htpic)
+ return -ENOMEM;
htpic->base = of_iomap(node, 0);
if (!htpic->base) {
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 1ece9337c78d..13e6016fe464 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -19,15 +19,14 @@
/* Registers */
#define HTVEC_EN_OFF 0x20
-#define HTVEC_MAX_PARENT_IRQ 4
+#define HTVEC_MAX_PARENT_IRQ 8
#define VEC_COUNT_PER_REG 32
-#define VEC_REG_COUNT 4
-#define VEC_COUNT (VEC_COUNT_PER_REG * VEC_REG_COUNT)
#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
struct htvec {
+ int num_parents;
void __iomem *base;
struct irq_domain *htvec_domain;
raw_spinlock_t htvec_lock;
@@ -43,7 +42,7 @@ static void htvec_irq_dispatch(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- for (i = 0; i < VEC_REG_COUNT; i++) {
+ for (i = 0; i < priv->num_parents; i++) {
pending = readl(priv->base + 4 * i);
while (pending) {
int bit = __ffs(pending);
@@ -109,11 +108,14 @@ static struct irq_chip htvec_irq_chip = {
static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
+ int ret;
unsigned long hwirq;
unsigned int type, i;
struct htvec *priv = domain->host_data;
- irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip,
@@ -147,7 +149,7 @@ static void htvec_reset(struct htvec *priv)
u32 idx;
/* Clear IRQ cause registers, mask all interrupts */
- for (idx = 0; idx < VEC_REG_COUNT; idx++) {
+ for (idx = 0; idx < priv->num_parents; idx++) {
writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
writel_relaxed(0xFFFFFFFF, priv->base);
}
@@ -157,7 +159,7 @@ static int htvec_of_init(struct device_node *node,
struct device_node *parent)
{
struct htvec *priv;
- int err, parent_irq[4], num_parents = 0, i;
+ int err, parent_irq[8], i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -176,33 +178,35 @@ static int htvec_of_init(struct device_node *node,
if (parent_irq[i] <= 0)
break;
- num_parents++;
+ priv->num_parents++;
}
- if (!num_parents) {
+ if (!priv->num_parents) {
pr_err("Failed to get parent irqs\n");
err = -ENODEV;
goto iounmap_base;
}
priv->htvec_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- VEC_COUNT,
- &htvec_domain_ops,
- priv);
+ (VEC_COUNT_PER_REG * priv->num_parents),
+ &htvec_domain_ops, priv);
if (!priv->htvec_domain) {
pr_err("Failed to create IRQ domain\n");
err = -ENOMEM;
- goto iounmap_base;
+ goto irq_dispose;
}
htvec_reset(priv);
- for (i = 0; i < num_parents; i++)
+ for (i = 0; i < priv->num_parents; i++)
irq_set_chained_handler_and_data(parent_irq[i],
htvec_irq_dispatch, priv);
return 0;
+irq_dispose:
+ for (; i > 0; i--)
+ irq_dispose_mapping(parent_irq[i - 1]);
iounmap_base:
iounmap(priv->base);
free_priv:
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 63b61474a0cc..9ed1bc473663 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -60,7 +60,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
if (!pending) {
/* Always blame LPC IRQ if we have that bug */
if (handler->priv->has_lpc_irq_errata &&
- (handler->parent_int_map & ~gc->mask_cache &
+ (handler->parent_int_map & gc->mask_cache &
BIT(LIOINTC_ERRATA_IRQ)))
pending = BIT(LIOINTC_ERRATA_IRQ);
else
@@ -114,6 +114,7 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
break;
default:
+ irq_gc_unlock_irqrestore(gc, flags);
return -EINVAL;
}
irq_gc_unlock_irqrestore(gc, flags);
@@ -131,11 +132,11 @@ static void liointc_resume(struct irq_chip_generic *gc)
irq_gc_lock_irqsave(gc, flags);
/* Disable all at first */
writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
- /* Revert map cache */
+ /* Restore map cache */
for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
writeb(priv->map_cache[i], gc->reg_base + i);
- /* Revert mask cache */
- writel(~gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
+ /* Restore mask cache */
+ writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
irq_gc_unlock_irqrestore(gc, flags);
}
@@ -243,7 +244,7 @@ int __init liointc_of_init(struct device_node *node,
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_type = liointc_set_type;
- gc->mask_cache = 0xffffffff;
+ gc->mask_cache = 0;
priv->gc = gc;
for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 50becd21008c..12aeeab43289 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -100,17 +100,12 @@ static int pch_msi_parent_domain_alloc(struct irq_domain *domain,
unsigned int virq, int hwirq)
{
struct irq_fwspec fwspec;
- int ret;
fwspec.fwnode = domain->parent->fwnode;
fwspec.param_count = 1;
fwspec.param[0] = hwirq;
- ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
- if (ret)
- return ret;
-
- return 0;
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
}
static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 2a05b9305012..9bf6b9a5f734 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -64,15 +64,6 @@ static void pch_pic_bitclr(struct pch_pic *priv, int offset, int bit)
raw_spin_unlock(&priv->pic_lock);
}
-static void pch_pic_eoi_irq(struct irq_data *d)
-{
- u32 idx = PIC_REG_IDX(d->hwirq);
- struct pch_pic *priv = irq_data_get_irq_chip_data(d);
-
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + idx * 4);
-}
-
static void pch_pic_mask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
@@ -85,6 +76,9 @@ static void pch_pic_unmask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ writel(BIT(PIC_REG_BIT(d->hwirq)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+
irq_chip_unmask_parent(d);
pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
}
@@ -124,7 +118,6 @@ static struct irq_chip pch_pic_irq_chip = {
.irq_mask = pch_pic_mask_irq,
.irq_unmask = pch_pic_unmask_irq,
.irq_ack = irq_chip_ack_parent,
- .irq_eoi = pch_pic_eoi_irq,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_type = pch_pic_set_type,
};
@@ -135,22 +128,25 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
int err;
unsigned int type;
unsigned long hwirq;
- struct irq_fwspec fwspec;
+ struct irq_fwspec *fwspec = arg;
+ struct irq_fwspec parent_fwspec;
struct pch_pic *priv = domain->host_data;
- irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+ err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (err)
+ return err;
- fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 1;
- fwspec.param[0] = hwirq + priv->ht_vec_base;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 1;
+ parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
- err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (err)
return err;
irq_domain_set_info(domain, virq, hwirq,
&pch_pic_irq_chip, priv,
- handle_fasteoi_ack_irq, NULL, NULL);
+ handle_level_irq, NULL, NULL);
irq_set_probe(virq);
return 0;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d70507133c1d..aacfa012c082 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -46,7 +46,7 @@
void __iomem *mips_gic_base;
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
@@ -617,8 +617,8 @@ error:
return ret;
}
-void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs)
+static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
{
irq_hw_number_t base_hwirq;
struct irq_data *data;
@@ -631,8 +631,8 @@ void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
bitmap_set(ipi_available, base_hwirq, nr_irqs);
}
-int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token)
+static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
bool is_ipi;
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 69ba8ce3c178..62a61275aaa3 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -295,4 +295,6 @@ out_free:
return ret;
}
-IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_cirq)
+IRQCHIP_MATCH("mediatek,mtk-cirq", mtk_cirq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(mtk_cirq)
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 73eae5966a40..7299c5ab4d10 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -15,7 +15,7 @@
#include <linux/spinlock.h>
struct mtk_sysirq_chip_data {
- spinlock_t lock;
+ raw_spinlock_t lock;
u32 nr_intpol_bases;
void __iomem **intpol_bases;
u32 *intpol_words;
@@ -37,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
reg_index = chip_data->which_word[hwirq];
offset = hwirq & 0x1f;
- spin_lock_irqsave(&chip_data->lock, flags);
+ raw_spin_lock_irqsave(&chip_data->lock, flags);
value = readl_relaxed(base + reg_index * 4);
if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
if (type == IRQ_TYPE_LEVEL_LOW)
@@ -53,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
data = data->parent_data;
ret = data->chip->irq_set_type(data, type);
- spin_unlock_irqrestore(&chip_data->lock, flags);
+ raw_spin_unlock_irqrestore(&chip_data->lock, flags);
return ret;
}
@@ -212,7 +212,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
ret = -ENOMEM;
goto out_free_which_word;
}
- spin_lock_init(&chip_data->lock);
+ raw_spin_lock_init(&chip_data->lock);
return 0;
@@ -231,4 +231,6 @@ out_free_chip:
kfree(chip_data);
return ret;
}
-IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_sysirq)
+IRQCHIP_MATCH("mediatek,mt6577-sysirq", mtk_sysirq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(mtk_sysirq)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index faa8482c8246..03a36be757d8 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -25,7 +25,6 @@
#define IRQS_PER_BANK 32
#define HWSPNLCK_TIMEOUT 1000 /* usec */
-#define HWSPNLCK_RETRY_DELAY 100 /* usec */
struct stm32_exti_bank {
u32 imr_ofst;
@@ -42,6 +41,7 @@ struct stm32_exti_bank {
struct stm32_desc_irq {
u32 exti;
u32 irq_parent;
+ struct irq_chip *chip;
};
struct stm32_exti_drv_data {
@@ -166,27 +166,41 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
&stm32mp1_exti_b3,
};
+static struct irq_chip stm32_exti_h_chip;
+static struct irq_chip stm32_exti_h_chip_direct;
+
static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
- { .exti = 0, .irq_parent = 6 },
- { .exti = 1, .irq_parent = 7 },
- { .exti = 2, .irq_parent = 8 },
- { .exti = 3, .irq_parent = 9 },
- { .exti = 4, .irq_parent = 10 },
- { .exti = 5, .irq_parent = 23 },
- { .exti = 6, .irq_parent = 64 },
- { .exti = 7, .irq_parent = 65 },
- { .exti = 8, .irq_parent = 66 },
- { .exti = 9, .irq_parent = 67 },
- { .exti = 10, .irq_parent = 40 },
- { .exti = 11, .irq_parent = 42 },
- { .exti = 12, .irq_parent = 76 },
- { .exti = 13, .irq_parent = 77 },
- { .exti = 14, .irq_parent = 121 },
- { .exti = 15, .irq_parent = 127 },
- { .exti = 16, .irq_parent = 1 },
- { .exti = 65, .irq_parent = 144 },
- { .exti = 68, .irq_parent = 143 },
- { .exti = 73, .irq_parent = 129 },
+ { .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
+ { .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
+ { .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
+ { .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
+ { .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
+ { .exti = 5, .irq_parent = 23, .chip = &stm32_exti_h_chip },
+ { .exti = 6, .irq_parent = 64, .chip = &stm32_exti_h_chip },
+ { .exti = 7, .irq_parent = 65, .chip = &stm32_exti_h_chip },
+ { .exti = 8, .irq_parent = 66, .chip = &stm32_exti_h_chip },
+ { .exti = 9, .irq_parent = 67, .chip = &stm32_exti_h_chip },
+ { .exti = 10, .irq_parent = 40, .chip = &stm32_exti_h_chip },
+ { .exti = 11, .irq_parent = 42, .chip = &stm32_exti_h_chip },
+ { .exti = 12, .irq_parent = 76, .chip = &stm32_exti_h_chip },
+ { .exti = 13, .irq_parent = 77, .chip = &stm32_exti_h_chip },
+ { .exti = 14, .irq_parent = 121, .chip = &stm32_exti_h_chip },
+ { .exti = 15, .irq_parent = 127, .chip = &stm32_exti_h_chip },
+ { .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
+ { .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 21, .irq_parent = 31, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 22, .irq_parent = 33, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 23, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 24, .irq_parent = 95, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip },
+ { .exti = 68, .irq_parent = 143, .chip = &stm32_exti_h_chip },
+ { .exti = 70, .irq_parent = 62, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip },
};
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
@@ -196,22 +210,23 @@ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
};
-static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data,
- irq_hw_number_t hwirq)
+static const struct
+stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data,
+ irq_hw_number_t hwirq)
{
- const struct stm32_desc_irq *desc_irq;
+ const struct stm32_desc_irq *desc = NULL;
int i;
if (!drv_data->desc_irqs)
- return -EINVAL;
+ return NULL;
for (i = 0; i < drv_data->irq_nr; i++) {
- desc_irq = &drv_data->desc_irqs[i];
- if (desc_irq->exti == hwirq)
- return desc_irq->irq_parent;
+ desc = &drv_data->desc_irqs[i];
+ if (desc->exti == hwirq)
+ break;
}
- return -EINVAL;
+ return desc;
}
static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
@@ -277,55 +292,24 @@ static int stm32_exti_set_type(struct irq_data *d,
return 0;
}
-static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
-{
- int ret, timeout = 0;
-
- if (!chip_data->host_data->hwlock)
- return 0;
-
- /*
- * Use the x_raw API since we are under spin_lock protection.
- * Do not use the x_timeout API because we are under irq_disable
- * mode (see __setup_irq())
- */
- do {
- ret = hwspin_trylock_raw(chip_data->host_data->hwlock);
- if (!ret)
- return 0;
-
- udelay(HWSPNLCK_RETRY_DELAY);
- timeout += HWSPNLCK_RETRY_DELAY;
- } while (timeout < HWSPNLCK_TIMEOUT);
-
- if (ret == -EBUSY)
- ret = -ETIMEDOUT;
-
- if (ret)
- pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
-
- return ret;
-}
-
-static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
-{
- if (chip_data->host_data->hwlock)
- hwspin_unlock_raw(chip_data->host_data->hwlock);
-}
-
static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
u32 rtsr, ftsr;
int err;
irq_gc_lock(gc);
- err = stm32_exti_hwspin_lock(chip_data);
- if (err)
- goto unlock;
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
@@ -338,7 +322,8 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
unspinlock:
- stm32_exti_hwspin_unlock(chip_data);
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
unlock:
irq_gc_unlock(gc);
@@ -504,15 +489,20 @@ static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
{
struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
void __iomem *base = chip_data->host_data->base;
u32 rtsr, ftsr;
int err;
raw_spin_lock(&chip_data->rlock);
- err = stm32_exti_hwspin_lock(chip_data);
- if (err)
- goto unlock;
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
@@ -525,7 +515,8 @@ static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
unspinlock:
- stm32_exti_hwspin_unlock(chip_data);
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
unlock:
raw_spin_unlock(&chip_data->rlock);
@@ -628,30 +619,47 @@ static struct irq_chip stm32_exti_h_chip = {
.irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
};
+static struct irq_chip stm32_exti_h_chip_direct = {
+ .name = "stm32-exti-h-direct",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = stm32_exti_h_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
+};
+
static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
unsigned int virq,
unsigned int nr_irqs, void *data)
{
struct stm32_exti_host_data *host_data = dm->host_data;
struct stm32_exti_chip_data *chip_data;
+ const struct stm32_desc_irq *desc;
struct irq_fwspec *fwspec = data;
struct irq_fwspec p_fwspec;
irq_hw_number_t hwirq;
- int p_irq, bank;
+ int bank;
hwirq = fwspec->param[0];
bank = hwirq / IRQS_PER_BANK;
chip_data = &host_data->chips_data[bank];
- irq_domain_set_hwirq_and_chip(dm, virq, hwirq,
- &stm32_exti_h_chip, chip_data);
- p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq);
- if (p_irq >= 0) {
+ desc = stm32_exti_get_desc(host_data->drv_data, hwirq);
+ if (!desc)
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(dm, virq, hwirq, desc->chip,
+ chip_data);
+ if (desc->irq_parent) {
p_fwspec.fwnode = dm->parent->fwnode;
p_fwspec.param_count = 3;
p_fwspec.param[0] = GIC_SPI;
- p_fwspec.param[1] = p_irq;
+ p_fwspec.param[1] = desc->irq_parent;
p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 7e3ebf6ed2cd..b7cc5d6580d8 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Aggregator irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
@@ -433,8 +433,6 @@ static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
default:
return -EINVAL;
}
-
- return -EINVAL;
}
static struct irq_chip ti_sci_inta_irq_chip = {
@@ -572,7 +570,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
inta->base = devm_ioremap_resource(dev, res);
if (IS_ERR(inta->base))
- return -ENODEV;
+ return PTR_ERR(inta->base);
domain = irq_domain_add_linear(dev_of_node(dev),
ti_sci_get_num_resources(inta->vint),
@@ -612,6 +610,6 @@ static struct platform_driver ti_sci_inta_irq_domain_driver = {
};
module_platform_driver(ti_sci_inta_irq_domain_driver);
-MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 59d51a20bbd8..5ea148faf2ab 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 Interrupt Router irqchip driver
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 3c87d925f74c..bc235db8a4c5 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -27,7 +27,10 @@
#define VIC_IRQ_STATUS 0x00
#define VIC_FIQ_STATUS 0x04
+#define VIC_RAW_STATUS 0x08
#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
+#define VIC_INT_ENABLE_CLEAR 0x14
#define VIC_INT_SOFT 0x18
#define VIC_INT_SOFT_CLEAR 0x1c
#define VIC_PROTECT 0x20
@@ -428,7 +431,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
vic_register(base, 0, irq_start, vic_sources, 0, node);
}
-void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
+static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
u32 vic_sources, u32 resume_sources,
struct device_node *node)
{
@@ -481,27 +484,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
__vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
}
-/**
- * vic_init_cascaded() - initialise a cascaded vectored interrupt controller
- * @base: iomem base address
- * @parent_irq: the parent IRQ we're cascaded off
- * @vic_sources: bitmask of interrupt sources to allow
- * @resume_sources: bitmask of interrupt sources to allow for resume
- *
- * This returns the base for the new interrupts or negative on error.
- */
-int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
- u32 vic_sources, u32 resume_sources)
-{
- struct vic_device *v;
-
- v = &vic_devices[vic_id];
- __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL);
- /* Return out acquired base */
- return v->irq;
-}
-EXPORT_SYMBOL_GPL(vic_init_cascaded);
-
#ifdef CONFIG_OF
static int __init vic_of_init(struct device_node *node,
struct device_node *parent)
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 2b35e68bea82..1bb0e36c2bf3 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -10,8 +10,10 @@
#include <linux/acpi.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
+#include <linux/platform_device.h>
/*
* This special of_device_id is the sentinel at the end of the
@@ -29,3 +31,30 @@ void __init irqchip_init(void)
of_irq_init(__irqchip_of_table);
acpi_probe_device_table(irqchip);
}
+
+int platform_irqchip_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *par_np = of_irq_find_parent(np);
+ of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+
+ if (!irq_init_cb)
+ return -EINVAL;
+
+ if (par_np == np)
+ par_np = NULL;
+
+ /*
+ * If there's a parent interrupt controller and none of the parent irq
+ * domains have been registered, that means the parent interrupt
+ * controller has not been initialized yet. it's not time for this
+ * interrupt controller to initialize. So, defer probe of this
+ * interrupt controller. The actual initialization callback of this
+ * interrupt controller can check for specific domains as necessary.
+ */
+ if (par_np && !irq_find_matching_host(np, DOMAIN_BUS_ANY))
+ return -EPROBE_DEFER;
+
+ return irq_init_cb(np, par_np);
+}
+EXPORT_SYMBOL_GPL(platform_irqchip_probe);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 6ae9e1f0819d..c1c5dfad57cc 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -11,9 +11,11 @@
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -430,4 +432,8 @@ fail:
return ret;
}
-IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
+IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
+IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index f8b8d6e313ee..9b5e67664ba3 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -11,19 +11,19 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_data/leds-s3c24xx.h>
#include <mach/regs-gpio.h>
-#include <plat/gpio-cfg.h>
/* our context */
struct s3c24xx_gpio_led {
struct led_classdev cdev;
struct s3c24xx_led_platdata *pdata;
+ struct gpio_desc *gpiod;
};
static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev)
@@ -35,20 +35,8 @@ static void s3c24xx_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct s3c24xx_gpio_led *led = to_gpio(led_cdev);
- struct s3c24xx_led_platdata *pd = led->pdata;
- int state = (value ? 1 : 0) ^ (pd->flags & S3C24XX_LEDF_ACTLOW);
- /* there will be a short delay between setting the output and
- * going from output to input when using tristate. */
-
- gpio_set_value(pd->gpio, state);
-
- if (pd->flags & S3C24XX_LEDF_TRISTATE) {
- if (value)
- gpio_direction_output(pd->gpio, state);
- else
- gpio_direction_input(pd->gpio);
- }
+ gpiod_set_value(led->gpiod, !!value);
}
static int s3c24xx_led_probe(struct platform_device *dev)
@@ -69,22 +57,12 @@ static int s3c24xx_led_probe(struct platform_device *dev)
led->pdata = pdata;
- ret = devm_gpio_request(&dev->dev, pdata->gpio, "S3C24XX_LED");
- if (ret < 0)
- return ret;
-
- /* no point in having a pull-up if we are always driving */
-
- s3c_gpio_setpull(pdata->gpio, S3C_GPIO_PULL_NONE);
-
- if (pdata->flags & S3C24XX_LEDF_TRISTATE)
- gpio_direction_input(pdata->gpio);
- else
- gpio_direction_output(pdata->gpio,
- pdata->flags & S3C24XX_LEDF_ACTLOW ? 1 : 0);
+ /* Default to off */
+ led->gpiod = devm_gpiod_get(&dev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(led->gpiod))
+ return PTR_ERR(led->gpiod);
/* register our new led device */
-
ret = devm_led_classdev_register(&dev->dev, &led->cdev);
if (ret < 0)
dev_err(&dev->dev, "led_classdev_register failed\n");
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index db38a68abb6c..fe78bf0fdce5 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -236,10 +236,6 @@ err_dev:
return tgt_dev;
}
-static const struct block_device_operations nvm_fops = {
- .owner = THIS_MODULE,
-};
-
static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
struct nvm_tgt_type *tt;
@@ -380,7 +376,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev;
}
- tqueue = blk_alloc_queue(tt->make_rq, dev->q->node);
+ tqueue = blk_alloc_queue(dev->q->node);
if (!tqueue) {
ret = -ENOMEM;
goto err_disk;
@@ -390,7 +386,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->flags = GENHD_FL_EXT_DEVT;
tdisk->major = 0;
tdisk->first_minor = 0;
- tdisk->fops = &nvm_fops;
+ tdisk->fops = tt->bops;
tdisk->queue = tqueue;
targetdata = tt->init(tgt_dev, tdisk, create->flags);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 6e677ff62cc9..b6246f73895c 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -47,9 +47,9 @@ static struct pblk_global_caches pblk_caches = {
struct bio_set pblk_bio_set;
-static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
+static blk_qc_t pblk_submit_bio(struct bio *bio)
{
- struct pblk *pblk = q->queuedata;
+ struct pblk *pblk = bio->bi_disk->queue->queuedata;
if (bio_op(bio) == REQ_OP_DISCARD) {
pblk_discard(pblk, bio);
@@ -63,7 +63,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
* constraint. Writes can be of arbitrary size.
*/
if (bio_data_dir(bio) == READ) {
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
pblk_submit_read(pblk, bio);
} else {
/* Prevent deadlock in the case of a modest LUN configuration
@@ -71,7 +71,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
* leaves at least 256KB available for user I/O.
*/
if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
@@ -79,6 +79,12 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static const struct block_device_operations pblk_bops = {
+ .owner = THIS_MODULE,
+ .submit_bio = pblk_submit_bio,
+};
+
+
static size_t pblk_trans_map_size(struct pblk *pblk)
{
int entry_size = 8;
@@ -1280,7 +1286,7 @@ static struct nvm_tgt_type tt_pblk = {
.name = "pblk",
.version = {1, 0, 0},
- .make_rq = pblk_make_rq,
+ .bops = &pblk_bops,
.capacity = pblk_capacity,
.init = pblk_init,
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 140927ebf41e..c28537a489bc 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -320,7 +320,7 @@ split_retry:
split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
&pblk_bio_set);
bio_chain(split_bio, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
/* New bio contains first N sectors of the previous one, so
* we can continue to use existing rqd, but we need to shrink
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 92d142d2b75f..49af60bdac92 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -382,7 +382,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dma_set_max_seg_size(&dev->ofdev.dev, 65536);
dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS)
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
@@ -391,7 +391,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
*/
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCI && CONFIG_DMA_OPS */
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 921888df6764..30ba3573626c 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -27,7 +27,7 @@ config BLK_DEV_MD
More information about Software RAID on Linux is contained in the
Software RAID mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. There you will also learn
+ <https://www.tldp.org/docs.html#howto>. There you will also learn
where to get the supporting user space utilities raidtools.
If unsure, say N.
@@ -71,7 +71,7 @@ config MD_RAID0
Information about Software RAID on Linux is contained in the
Software-RAID mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. There you will also
+ <https://www.tldp.org/docs.html#howto>. There you will also
learn where to get the supporting user space utilities raidtools.
To compile this as a module, choose M here: the module
@@ -93,7 +93,7 @@ config MD_RAID1
Information about Software RAID on Linux is contained in the
Software-RAID mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. There you will also
+ <https://www.tldp.org/docs.html#howto>. There you will also
learn where to get the supporting user space utilities raidtools.
If you want to use such a RAID-1 set, say Y. To compile this code
@@ -148,7 +148,7 @@ config MD_RAID456
Information about Software RAID on Linux is contained in the
Software-RAID mini-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. There you will also
+ <https://www.tldp.org/docs.html#howto>. There you will also
learn where to get the supporting user space utilities raidtools.
If you want to use such a RAID-4/RAID-5/RAID-6 set, say Y. To
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index bf7dd96db9b3..d1ca4d059c20 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -27,7 +27,7 @@ config BCACHE_CLOSURES_DEBUG
interface to list them, which makes it possible to see asynchronous
operations that get stuck.
-config BCACHE_ASYNC_REGISTRAION
+config BCACHE_ASYNC_REGISTRATION
bool "Asynchronous device registration (EXPERIMENTAL)"
depends on BCACHE
help
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index fd714628da6a..5b87e59676b8 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_BCACHE) += bcache.o
bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
- util.o writeback.o
+ util.o writeback.o features.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a1df0d95151c..52035a78d836 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
- unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
+ unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
unsigned int i;
int r;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 221e0191b687..4fd03d2496d8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -264,7 +264,7 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE 2
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
- unsigned int nr_stripes;
+ int nr_stripes;
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
@@ -762,11 +762,32 @@ struct bbio {
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
#define block_bytes(c) ((c)->sb.block_size << 9)
-#define prios_per_bucket(c) \
- ((bucket_bytes(c) - sizeof(struct prio_set)) / \
+static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
+{
+ unsigned int n, max_pages;
+
+ max_pages = min_t(unsigned int,
+ __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
+ MAX_ORDER_NR_PAGES);
+
+ n = sb->bucket_size / PAGE_SECTORS;
+ if (n > max_pages)
+ n = max_pages;
+
+ return n;
+}
+
+static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
+{
+ return meta_bucket_pages(sb) << PAGE_SHIFT;
+}
+
+#define prios_per_bucket(ca) \
+ ((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) / \
sizeof(struct bucket_disk))
-#define prio_buckets(c) \
- DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
+
+#define prio_buckets(ca) \
+ DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
{
@@ -929,7 +950,7 @@ static inline void closure_bio_submit(struct cache_set *c,
bio_endio(bio);
return;
}
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
/*
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4995fcaefe29..67a2c47f4201 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -322,7 +322,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
b->page_order = page_order;
- t->data = (void *) __get_free_pages(gfp, b->page_order);
+ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
if (!t->data)
goto err;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 6548a601edf0..3d8bd0692af3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache);
- free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
#endif
list_splice(&c->btree_cache_freeable,
@@ -785,7 +785,15 @@ int bch_btree_cache_alloc(struct cache_set *c)
mutex_init(&c->verify_lock);
c->verify_ondisk = (void *)
- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
+ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
+ if (!c->verify_ondisk) {
+ /*
+ * Don't worry about the mca_rereserve buckets
+ * allocated in previous for-loop, they will be
+ * handled properly in bch_cache_set_unregister().
+ */
+ return -ENOMEM;
+ }
c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
@@ -959,7 +967,7 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary and running under generic_make_request, returns -EAGAIN.
+ * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
new file mode 100644
index 000000000000..4442df48d28c
--- /dev/null
+++ b/drivers/md/bcache/features.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Feature set bits and string conversion.
+ * Inspired by ext4's features compat/incompat/ro_compat related code.
+ *
+ * Copyright 2020 Coly Li <colyli@suse.de>
+ *
+ */
+#include <linux/bcache.h>
+#include "bcache.h"
+#include "features.h"
+
+struct feature {
+ int compat;
+ unsigned int mask;
+ const char *string;
+};
+
+static struct feature feature_list[] = {
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ "large_bucket"},
+ {0, 0, 0 },
+};
+
+#define compose_feature_string(type) \
+({ \
+ struct feature *f; \
+ bool first = true; \
+ \
+ for (f = &feature_list[0]; f->compat != 0; f++) { \
+ if (f->compat != BCH_FEATURE_ ## type) \
+ continue; \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
+ if (first) { \
+ out += snprintf(out, buf + size - out, \
+ "["); \
+ } else { \
+ out += snprintf(out, buf + size - out, \
+ " ["); \
+ } \
+ } else if (!first) { \
+ out += snprintf(out, buf + size - out, " "); \
+ } \
+ \
+ out += snprintf(out, buf + size - out, "%s", f->string);\
+ \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
+ out += snprintf(out, buf + size - out, "]"); \
+ \
+ first = false; \
+ } \
+ if (!first) \
+ out += snprintf(out, buf + size - out, "\n"); \
+})
+
+int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(COMPAT);
+ return out - buf;
+}
+
+int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(RO_COMPAT);
+ return out - buf;
+}
+
+int bch_print_cache_set_feature_incompat(struct cache_set *c, char *buf, int size)
+{
+ char *out = buf;
+ compose_feature_string(INCOMPAT);
+ return out - buf;
+}
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
new file mode 100644
index 000000000000..a1653c478041
--- /dev/null
+++ b/drivers/md/bcache/features.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _BCACHE_FEATURES_H
+#define _BCACHE_FEATURES_H
+
+#include <linux/bcache.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define BCH_FEATURE_COMPAT 0
+#define BCH_FEATURE_RO_COMPAT 1
+#define BCH_FEATURE_INCOMPAT 2
+#define BCH_FEATURE_TYPE_MASK 0x03
+
+/* Feature set definition */
+/* Incompat feature set */
+#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+
+#define BCH_FEATURE_COMPAT_SUUP 0
+#define BCH_FEATURE_RO_COMPAT_SUUP 0
+#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+
+#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_compat & (mask))
+#define BCH_HAS_RO_COMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_ro_compat & (mask))
+#define BCH_HAS_INCOMPAT_FEATURE(sb, mask) \
+ ((sb)->feature_incompat & (mask))
+
+#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ return (((sb)->feature_compat & \
+ BCH##_FEATURE_COMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_compat |= \
+ BCH##_FEATURE_COMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_compat &= \
+ ~BCH##_FEATURE_COMPAT_##flagname; \
+}
+
+#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ return (((sb)->feature_ro_compat & \
+ BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_ro_compat |= \
+ BCH##_FEATURE_RO_COMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_ro_compat &= \
+ ~BCH##_FEATURE_RO_COMPAT_##flagname; \
+}
+
+#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
+static inline int bch_has_feature_##name(struct cache_sb *sb) \
+{ \
+ return (((sb)->feature_incompat & \
+ BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
+} \
+static inline void bch_set_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_incompat |= \
+ BCH##_FEATURE_INCOMPAT_##flagname; \
+} \
+static inline void bch_clear_feature_##name(struct cache_sb *sb) \
+{ \
+ (sb)->feature_incompat &= \
+ ~BCH##_FEATURE_INCOMPAT_##flagname; \
+}
+
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+
+int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
+int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
+int bch_print_cache_set_feature_incompat(struct cache_set *c, char *buf, int size);
+
+#endif
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index b25ee33b0d0b..a14a445618b4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 90aac4e2333f..77fbfd52edcf 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -217,10 +217,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
*/
pr_debug("falling back to linear search\n");
- for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
- l < ca->sb.njournal_buckets;
- l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
- l + 1))
+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
if (read_bucket(l))
goto bsearch;
@@ -999,8 +996,8 @@ int bch_journal_alloc(struct cache_set *c)
j->w[1].c = c;
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
+ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
+ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
return -ENOMEM;
return 0;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7891fb512736..5872d6470470 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -145,8 +145,8 @@ static void read_moving(struct cache_set *c)
continue;
}
- io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
GFP_KERNEL);
if (!io)
goto err;
@@ -206,8 +206,8 @@ void bch_moving_gc(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) {
- unsigned int sectors_to_move = 0;
- unsigned int reserve_sectors = ca->sb.bucket_size *
+ unsigned long sectors_to_move = 0;
+ unsigned long reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 7acf024e99f3..c7cadaafa947 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -668,7 +668,9 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- bio_end_io_acct(s->orig_bio, s->start_time);
+ /* Count on bcache device */
+ disk_end_io_acct(s->d->disk, bio_op(s->orig_bio), s->start_time);
+
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -728,8 +730,8 @@ static inline struct search *search_alloc(struct bio *bio,
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
- s->start_time = bio_start_io_acct(bio);
-
+ /* Count on the bcache device */
+ s->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -1080,7 +1082,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- bio_end_io_acct(bio, ddip->start_time);
+ /* Count on the bcache device */
+ disk_end_io_acct(ddip->d->disk, bio_op(bio), ddip->start_time);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1105,7 +1108,8 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
- ddip->start_time = bio_start_io_acct(bio);
+ /* Count on the bcache device */
+ ddip->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
@@ -1115,7 +1119,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio->bi_end_io(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void quit_max_writeback_rate(struct cache_set *c,
@@ -1158,7 +1162,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
@@ -1197,7 +1201,7 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
- * generic_make_request
+ * submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
cached_dev_nodata,
@@ -1228,36 +1232,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
-static int cached_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- struct request_queue *q = bdev_get_queue(dc->bdev);
- int ret = 0;
-
- if (bdi_congested(q->backing_dev_info, bits))
- return 1;
-
- if (cached_dev_get(dc)) {
- unsigned int i;
- struct cache *ca;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- cached_dev_put(dc);
- }
-
- return ret;
-}
-
void bch_cached_dev_request_init(struct cached_dev *dc)
{
- struct gendisk *g = dc->disk.disk;
-
- g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1291,7 +1267,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
+blk_qc_t flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1311,8 +1287,7 @@ blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
if (!bio->bi_iter.bi_size) {
/*
- * can't call bch_journal_meta from under
- * generic_make_request
+ * can't call bch_journal_meta from under submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
@@ -1342,27 +1317,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
return -ENOTTY;
}
-static int flash_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct request_queue *q;
- struct cache *ca;
- unsigned int i;
- int ret = 0;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- return ret;
-}
-
void bch_flash_dev_request_init(struct bcache_device *d)
{
- struct gendisk *g = d->disk;
-
- g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index bb005c93dd72..82b38366a95d 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2014016f9a60..1bbdc410ee3c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -13,6 +13,7 @@
#include "extents.h"
#include "request.h"
#include "writeback.h"
+#include "features.h"
#include <linux/blkdev.h>
#include <linux/debugfs.h>
@@ -59,6 +60,92 @@ struct workqueue_struct *bch_journal_wq;
/* Superblock */
+static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
+{
+ unsigned int bucket_size = le16_to_cpu(s->bucket_size);
+
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
+ bch_has_feature_large_bucket(sb))
+ bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+
+ return bucket_size;
+}
+
+static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev,
+ struct cache_sb_disk *s)
+{
+ const char *err;
+ unsigned int i;
+
+ sb->first_bucket= le16_to_cpu(s->first_bucket);
+ sb->nbuckets = le64_to_cpu(s->nbuckets);
+ sb->bucket_size = get_bucket_size(sb, s);
+
+ sb->nr_in_set = le16_to_cpu(s->nr_in_set);
+ sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
+
+ err = "Too many journal buckets";
+ if (sb->keys > SB_JOURNAL_BUCKETS)
+ goto err;
+
+ err = "Too many buckets";
+ if (sb->nbuckets > LONG_MAX)
+ goto err;
+
+ err = "Not enough buckets";
+ if (sb->nbuckets < 1 << 7)
+ goto err;
+
+ err = "Bad block size (not power of 2)";
+ if (!is_power_of_2(sb->block_size))
+ goto err;
+
+ err = "Bad block size (larger than page size)";
+ if (sb->block_size > PAGE_SECTORS)
+ goto err;
+
+ err = "Bad bucket size (not power of 2)";
+ if (!is_power_of_2(sb->bucket_size))
+ goto err;
+
+ err = "Bad bucket size (smaller than page size)";
+ if (sb->bucket_size < PAGE_SECTORS)
+ goto err;
+
+ err = "Invalid superblock: device too small";
+ if (get_capacity(bdev->bd_disk) <
+ sb->bucket_size * sb->nbuckets)
+ goto err;
+
+ err = "Bad UUID";
+ if (bch_is_zero(sb->set_uuid, 16))
+ goto err;
+
+ err = "Bad cache device number in set";
+ if (!sb->nr_in_set ||
+ sb->nr_in_set <= sb->nr_this_dev ||
+ sb->nr_in_set > MAX_CACHES_PER_SET)
+ goto err;
+
+ err = "Journal buckets not sequential";
+ for (i = 0; i < sb->keys; i++)
+ if (sb->d[i] != sb->first_bucket + i)
+ goto err;
+
+ err = "Too many journal buckets";
+ if (sb->first_bucket + sb->keys > sb->nbuckets)
+ goto err;
+
+ err = "Invalid superblock: first bucket comes before end of super";
+ if (sb->first_bucket * sb->bucket_size < 16)
+ goto err;
+
+ err = NULL;
+err:
+ return err;
+}
+
+
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
struct cache_sb_disk **res)
{
@@ -84,7 +171,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->flags = le64_to_cpu(s->flags);
sb->seq = le64_to_cpu(s->seq);
sb->last_mount = le32_to_cpu(s->last_mount);
- sb->first_bucket = le16_to_cpu(s->first_bucket);
sb->keys = le16_to_cpu(s->keys);
for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
@@ -101,10 +187,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
if (memcmp(sb->magic, bcache_magic, 16))
goto err;
- err = "Too many journal buckets";
- if (sb->keys > SB_JOURNAL_BUCKETS)
- goto err;
-
err = "Bad checksum";
if (s->csum != csum_set(s))
goto err;
@@ -124,6 +206,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->data_offset = BDEV_DATA_START_DEFAULT;
break;
case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
+ case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
sb->data_offset = le64_to_cpu(s->data_offset);
err = "Bad data offset";
@@ -133,55 +216,21 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
break;
case BCACHE_SB_VERSION_CDEV:
case BCACHE_SB_VERSION_CDEV_WITH_UUID:
- sb->nbuckets = le64_to_cpu(s->nbuckets);
- sb->bucket_size = le16_to_cpu(s->bucket_size);
-
- sb->nr_in_set = le16_to_cpu(s->nr_in_set);
- sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
-
- err = "Too many buckets";
- if (sb->nbuckets > LONG_MAX)
- goto err;
-
- err = "Not enough buckets";
- if (sb->nbuckets < 1 << 7)
- goto err;
-
- err = "Bad block/bucket size";
- if (!is_power_of_2(sb->block_size) ||
- sb->block_size > PAGE_SECTORS ||
- !is_power_of_2(sb->bucket_size) ||
- sb->bucket_size < PAGE_SECTORS)
- goto err;
-
- err = "Invalid superblock: device too small";
- if (get_capacity(bdev->bd_disk) <
- sb->bucket_size * sb->nbuckets)
- goto err;
-
- err = "Bad UUID";
- if (bch_is_zero(sb->set_uuid, 16))
- goto err;
-
- err = "Bad cache device number in set";
- if (!sb->nr_in_set ||
- sb->nr_in_set <= sb->nr_this_dev ||
- sb->nr_in_set > MAX_CACHES_PER_SET)
- goto err;
-
- err = "Journal buckets not sequential";
- for (i = 0; i < sb->keys; i++)
- if (sb->d[i] != sb->first_bucket + i)
- goto err;
-
- err = "Too many journal buckets";
- if (sb->first_bucket + sb->keys > sb->nbuckets)
+ err = read_super_common(sb, bdev, s);
+ if (err)
goto err;
-
- err = "Invalid superblock: first bucket comes before end of super";
- if (sb->first_bucket * sb->bucket_size < 16)
+ break;
+ case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
+ /*
+ * Feature bits are needed in read_super_common(),
+ * convert them firstly.
+ */
+ sb->feature_compat = le64_to_cpu(s->feature_compat);
+ sb->feature_incompat = le64_to_cpu(s->feature_incompat);
+ sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+ err = read_super_common(sb, bdev, s);
+ if (err)
goto err;
-
break;
default:
err = "Unsupported superblock version";
@@ -217,7 +266,6 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
offset_in_page(out));
out->offset = cpu_to_le64(sb->offset);
- out->version = cpu_to_le64(sb->version);
memcpy(out->uuid, sb->uuid, 16);
memcpy(out->set_uuid, sb->set_uuid, 16);
@@ -233,6 +281,13 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
for (i = 0; i < sb->keys; i++)
out->d[i] = cpu_to_le64(sb->d[i]);
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ out->feature_compat = cpu_to_le64(sb->feature_compat);
+ out->feature_incompat = cpu_to_le64(sb->feature_incompat);
+ out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
+ }
+
+ out->version = cpu_to_le64(sb->version);
out->csum = csum_set(out);
pr_debug("ver %llu, flags %llu, seq %llu\n",
@@ -289,17 +344,20 @@ void bcache_write_super(struct cache_set *c)
{
struct closure *cl = &c->sb_write;
struct cache *ca;
- unsigned int i;
+ unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
down(&c->sb_write_mutex);
closure_init(cl, &c->cl);
c->sb.seq++;
+ if (c->sb.version > version)
+ version = c->sb.version;
+
for_each_cache(ca, c, i) {
struct bio *bio = &ca->sb_bio;
- ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+ ca->sb.version = version;
ca->sb.seq = c->sb.seq;
ca->sb.last_mount = c->sb.last_mount;
@@ -423,6 +481,7 @@ static int __uuid_write(struct cache_set *c)
BKEY_PADDED(key) k;
struct closure cl;
struct cache *ca;
+ unsigned int size;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
@@ -430,7 +489,8 @@ static int __uuid_write(struct cache_set *c)
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
return 1;
- SET_KEY_SIZE(&k.key, c->sb.bucket_size);
+ size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+ SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
@@ -518,7 +578,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
bio_set_dev(bio, ca->bdev);
- bio->bi_iter.bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
@@ -576,7 +636,7 @@ int bch_prio_write(struct cache *ca, bool wait)
p->next_bucket = ca->prio_buckets[i + 1];
p->magic = pset_magic(&ca->sb);
- p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
+ p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
BUG_ON(bucket == -1);
@@ -629,7 +689,7 @@ static int prio_read(struct cache *ca, uint64_t bucket)
prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum !=
- bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
+ bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
pr_warn("bad csum reading priorities\n");
goto out;
}
@@ -680,7 +740,16 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
return d->ioctl(d, mode, cmd, arg);
}
-static const struct block_device_operations bcache_ops = {
+static const struct block_device_operations bcache_cached_ops = {
+ .submit_bio = cached_dev_submit_bio,
+ .open = open_dev,
+ .release = release_dev,
+ .ioctl = ioctl_dev,
+ .owner = THIS_MODULE,
+};
+
+static const struct block_device_operations bcache_flash_ops = {
+ .submit_bio = flash_dev_submit_bio,
.open = open_dev,
.release = release_dev,
.ioctl = ioctl_dev,
@@ -820,25 +889,25 @@ static void bcache_device_free(struct bcache_device *d)
}
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors, make_request_fn make_request_fn,
- struct block_device *cached_bdev)
+ sector_t sectors, struct block_device *cached_bdev,
+ const struct block_device_operations *ops)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
SIZE_MAX / sizeof(atomic_t));
- size_t n;
+ uint64_t n;
int idx;
if (!d->stripe_size)
d->stripe_size = 1 << 31;
- d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
-
- if (!d->nr_stripes || d->nr_stripes > max_stripes) {
- pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)\n",
- (unsigned int)d->nr_stripes);
+ n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
+ if (!n || n > max_stripes) {
+ pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
+ n);
return -ENOMEM;
}
+ d->nr_stripes = n;
n = d->nr_stripes * sizeof(atomic_t);
d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
@@ -868,16 +937,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->major = bcache_major;
d->disk->first_minor = idx_to_first_minor(idx);
- d->disk->fops = &bcache_ops;
+ d->disk->fops = ops;
d->disk->private_data = d;
- q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
return -ENOMEM;
d->disk->queue = q;
- q->queuedata = d;
- q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1356,7 +1423,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
- cached_dev_make_request, dc->bdev);
+ dc->bdev, &bcache_cached_ops);
if (ret)
return ret;
@@ -1469,7 +1536,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
if (bcache_device_init(d, block_bytes(c), u->sectors,
- flash_dev_make_request, NULL))
+ NULL, &bcache_flash_ops))
goto err;
bcache_device_attach(d, c, u - c->uuids);
@@ -1613,7 +1680,7 @@ static void cache_set_free(struct closure *cl)
}
bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
@@ -1776,7 +1843,10 @@ void bch_cache_set_unregister(struct cache_set *c)
}
#define alloc_bucket_pages(gfp, c) \
- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
+
+#define alloc_meta_bucket_pages(gfp, sb) \
+ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
@@ -1807,12 +1877,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->sb.bucket_size = sb->bucket_size;
c->sb.nr_in_set = sb->nr_in_set;
c->sb.last_mount = sb->last_mount;
+ c->sb.version = sb->version;
+ if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ c->sb.feature_compat = sb->feature_compat;
+ c->sb.feature_ro_compat = sb->feature_ro_compat;
+ c->sb.feature_incompat = sb->feature_incompat;
+ }
+
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
- c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
+ c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0);
- c->btree_pages = bucket_pages(c);
+ c->btree_pages = meta_bucket_pages(&c->sb);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
@@ -1838,24 +1915,46 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = (sb->bucket_size / sb->block_size + 1) *
+ iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
sizeof(struct btree_iter_set);
- if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
- mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
- mempool_init_kmalloc_pool(&c->bio_meta, 2,
- sizeof(struct bbio) + sizeof(struct bio_vec) *
- bucket_pages(c)) ||
- mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
- bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
- BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
- !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
- !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
- WQ_MEM_RECLAIM, 0)) ||
- bch_journal_alloc(c) ||
- bch_btree_cache_alloc(c) ||
- bch_open_buckets_alloc(c) ||
- bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
+ c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+ if (!c->devices)
+ goto err;
+
+ if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
+ sizeof(struct bbio) +
+ sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
+ goto err;
+
+ if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
+ goto err;
+
+ if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
+ BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
+ goto err;
+
+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
+ if (!c->uuids)
+ goto err;
+
+ c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
+ if (!c->moving_gc_wq)
+ goto err;
+
+ if (bch_journal_alloc(c))
+ goto err;
+
+ if (bch_btree_cache_alloc(c))
+ goto err;
+
+ if (bch_open_buckets_alloc(c))
+ goto err;
+
+ if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
goto err;
c->congested_read_threshold_us = 2000;
@@ -2100,7 +2199,14 @@ found:
sysfs_create_link(&c->kobj, &ca->kobj, buf))
goto err;
- if (ca->sb.seq > c->sb.seq) {
+ /*
+ * A special case is both ca->sb.seq and c->sb.seq are 0,
+ * such condition happens on a new created cache device whose
+ * super block is never flushed yet. In this case c->sb.version
+ * and other members should be updated too, otherwise we will
+ * have a mistaken super block version in cache set.
+ */
+ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
c->sb.version = ca->sb.version;
memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
c->sb.flags = ca->sb.flags;
@@ -2138,7 +2244,7 @@ void bch_cache_release(struct kobject *kobj)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
}
- free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
+ free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
kfree(ca->prio_buckets);
vfree(ca->buckets);
@@ -2235,7 +2341,7 @@ static int cache_alloc(struct cache *ca)
goto err_prio_buckets_alloc;
}
- ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca);
+ ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
if (!ca->disk_buckets) {
err = "ca->disk_buckets alloc failed";
goto err_disk_buckets_alloc;
@@ -2782,7 +2888,7 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
-#ifdef CONFIG_BCACHE_ASYNC_REGISTRAION
+#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
&ksysfs_register_async.attr,
#endif
&ksysfs_pendings_cleanup.attr,
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 0dadec5a78f6..ac06c0bc3c0a 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -11,6 +11,7 @@
#include "btree.h"
#include "request.h"
#include "writeback.h"
+#include "features.h"
#include <linux/blkdev.h>
#include <linux/sort.h>
@@ -88,6 +89,9 @@ read_attribute(btree_used_percent);
read_attribute(average_key_size);
read_attribute(dirty_data);
read_attribute(bset_tree_stats);
+read_attribute(feature_compat);
+read_attribute(feature_ro_compat);
+read_attribute(feature_incompat);
read_attribute(state);
read_attribute(cache_read_races);
@@ -779,6 +783,13 @@ SHOW(__bch_cache_set)
if (attr == &sysfs_bset_tree_stats)
return bch_bset_print_stats(c, buf);
+ if (attr == &sysfs_feature_compat)
+ return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
+ if (attr == &sysfs_feature_ro_compat)
+ return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
+ if (attr == &sysfs_feature_incompat)
+ return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
+
return 0;
}
SHOW_LOCKED(bch_cache_set)
@@ -987,6 +998,9 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_io_disable,
&sysfs_cutoff_writeback,
&sysfs_cutoff_writeback_sync,
+ &sysfs_feature_compat,
+ &sysfs_feature_ro_compat,
+ &sysfs_feature_incompat,
NULL
};
KTYPE(bch_cache_set_internal);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 1cf1e5016cb9..4f4ad6b3d43a 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -459,10 +459,8 @@ static void read_dirty(struct cached_dev *dc)
for (i = 0; i < nk; i++) {
w = keys[i];
- io = kzalloc(sizeof(struct dirty_io) +
- sizeof(struct bio_vec) *
- DIV_ROUND_UP(KEY_SIZE(&w->key),
- PAGE_SECTORS),
+ io = kzalloc(struct_size(io, bio.bi_inline_vecs,
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
GFP_KERNEL);
if (!io)
goto err;
@@ -523,15 +521,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors)
{
struct bcache_device *d = c->devices[inode];
- unsigned int stripe_offset, stripe, sectors_dirty;
+ unsigned int stripe_offset, sectors_dirty;
+ int stripe;
if (!d)
return;
+ stripe = offset_to_stripe(d, offset);
+ if (stripe < 0)
+ return;
+
if (UUID_FLASH_ONLY(&c->uuids[inode]))
atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
- stripe = offset_to_stripe(d, offset);
stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) {
@@ -571,12 +573,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
- unsigned int start_stripe, stripe, next_stripe;
+ unsigned int start_stripe, next_stripe;
+ int stripe;
bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
-
- if (stripe >= dc->disk.nr_stripes)
+ if (stripe < 0)
stripe = 0;
start_stripe = stripe;
@@ -825,10 +827,8 @@ static int bch_dirty_init_thread(void *arg)
struct btree_iter iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
- int i;
k = p = NULL;
- i = 0;
cur_idx = prev_idx = 0;
bch_btree_iter_init(&c->root->keys, &iter, NULL);
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index b029843ce5b6..3f1230e22de0 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -52,10 +52,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline unsigned int offset_to_stripe(struct bcache_device *d,
+static inline int offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
do_div(offset, d->stripe_size);
+
+ /* d->nr_stripes is in range [1, INT_MAX] */
+ if (unlikely(offset >= d->nr_stripes)) {
+ pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
+ offset, d->nr_stripes);
+ return -EINVAL;
+ }
+
+ /*
+ * Here offset is definitly smaller than INT_MAX,
+ * return it as int will never overflow.
+ */
return offset;
}
@@ -63,7 +75,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned int nr_sectors)
{
- unsigned int stripe = offset_to_stripe(&dc->disk, offset);
+ int stripe = offset_to_stripe(&dc->disk, offset);
+
+ if (stripe < 0)
+ return false;
while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d3bb355819a4..96c93802ee4d 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -421,8 +421,6 @@ struct cache {
struct rw_semaphore quiesce_lock;
- struct dm_target_callbacks callbacks;
-
/*
* origin_blocks entries, discarded if set.
*/
@@ -886,7 +884,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
static void accounted_request(struct cache *cache, struct bio *bio)
{
accounted_begin(cache, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void issue_op(struct bio *bio, void *context)
@@ -1792,7 +1790,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
bool commit_needed;
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return commit_needed;
}
@@ -1858,7 +1856,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
if (cache->features.discard_passdown) {
remap_to_origin(cache, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else
bio_endio(bio);
@@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size)
cache->cache_size = size;
}
-static int is_congested(struct dm_dev *dev, int bdi_bits)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
-static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct cache *cache = container_of(cb, struct cache, callbacks);
-
- return is_congested(cache->origin_dev, bdi_bits) ||
- is_congested(cache->cache_dev, bdi_bits);
-}
-
#define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result)
@@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->callbacks.congested_fn = cache_is_congested;
- dm_table_add_target_callbacks(ti->table, &cache->callbacks);
-
cache->metadata_dev = ca->metadata_dev;
cache->origin_dev = ca->origin_dev;
cache->cache_dev = ca->cache_dev;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 5ce96ddf1ce1..bdb255edc200 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -68,7 +68,6 @@ struct hash_table_bucket;
struct clone {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
struct dm_dev *metadata_dev;
struct dm_dev *dest_dev;
@@ -330,7 +329,7 @@ static void submit_bios(struct bio_list *bios)
blk_start_plug(&plug);
while ((bio = bio_list_pop(bios)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
blk_finish_plug(&plug);
}
@@ -346,7 +345,7 @@ static void submit_bios(struct bio_list *bios)
static void issue_bio(struct clone *clone, struct bio *bio)
{
if (!bio_triggers_commit(clone, bio)) {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
@@ -473,7 +472,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
bio_region_range(clone, bio, &rs, &nr_regions);
trim_bio(bio, region_to_sector(clone, rs),
nr_regions << clone->region_shift);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else
bio_endio(bio);
}
@@ -865,7 +864,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
bio->bi_private = hd;
atomic_inc(&hd->clone->hydrations_in_flight);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
/*
@@ -1281,7 +1280,7 @@ static void process_deferred_flush_bios(struct clone *clone)
*/
bio_endio(bio);
} else {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
}
@@ -1518,18 +1517,6 @@ error:
DMEMIT("Error");
}
-static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct request_queue *dest_q, *source_q;
- struct clone *clone = container_of(cb, struct clone, callbacks);
-
- source_q = bdev_get_queue(clone->source_dev->bdev);
- dest_q = bdev_get_queue(clone->dest_dev->bdev);
-
- return (bdi_congested(dest_q->backing_dev_info, bdi_bits) |
- bdi_congested(source_q->backing_dev_info, bdi_bits));
-}
-
static sector_t get_dev_size(struct dm_dev *dev)
{
return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
@@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto out_with_mempool;
mutex_init(&clone->commit_lock);
- clone->callbacks.congested_fn = clone_is_congested;
- dm_table_add_target_callbacks(ti->table, &clone->callbacks);
/* Enable flushes */
ti->num_flush_bios = 1;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 000ddfab5ba0..b437a14c4942 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -300,7 +300,7 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
* elephant: The extended version of eboiv with additional Elephant diffuser
* used with Bitlocker CBC mode.
* This mode was used in older Windows systems
- * http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
+ * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
*/
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
@@ -1789,7 +1789,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1;
}
- generic_make_request(clone);
+ submit_bio_noacct(clone);
return 0;
}
@@ -1815,7 +1815,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- generic_make_request(clone);
+ submit_bio_noacct(clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
@@ -1893,7 +1893,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
clone->bi_iter.bi_sector = cc->start + io->sector;
if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
- generic_make_request(clone);
+ submit_bio_noacct(clone);
return;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f496213f8b67..2628a832787b 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bdb84b8e7162..b24e3839bb3a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr)
struct era {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
struct dm_dev *metadata_dev;
struct dm_dev *origin_dev;
@@ -1265,7 +1264,7 @@ static void process_deferred_bios(struct era *era)
bio_io_error(bio);
else
while ((bio = bio_list_pop(&marked_bios)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void process_rpc_calls(struct era *era)
@@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era)
/*----------------------------------------------------------------
* Target methods
*--------------------------------------------------------------*/
-static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
-static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct era *era = container_of(cb, struct era, callbacks);
- return dev_is_congested(era->origin_dev, bdi_bits);
-}
-
static void era_destroy(struct era *era)
{
if (era->md)
@@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
- era->callbacks.congested_fn = era_is_congested;
- dm_table_add_target_callbacks(ti->table, &era->callbacks);
return 0;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index a83a1de1e03f..5da3eb661e50 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2115,12 +2115,12 @@ offload_to_thread:
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
- generic_make_request(bio);
+ submit_bio_noacct(bio);
if (need_sync_io) {
wait_for_completion_io(&read_comp);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 81ffc59d05c9..4312007d2d34 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -306,7 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
struct request_queue *q = bdev_get_queue(where->bdev);
unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;
- unsigned int uninitialized_var(special_cmd_max_sectors);
+ unsigned int special_cmd_max_sectors;
/*
* Reject unsupported discard and write same requests.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 489935d5f22d..056d891a32a9 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1844,7 +1844,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
int ioctl_flags;
int param_flags;
unsigned int cmd;
- struct dm_ioctl *uninitialized_var(param);
+ struct dm_ioctl *param;
ioctl_fn fn = NULL;
size_t input_param_size;
struct dm_ioctl param_kernel;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 78cff42d987e..73bb23de6336 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -677,7 +677,7 @@ static void process_queued_bios(struct work_struct *work)
bio_endio(bio);
break;
case DM_MAPIO_REMAPPED:
- generic_make_request(bio);
+ submit_bio_noacct(bio);
break;
case DM_MAPIO_SUBMITTED:
break;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 10e8b2fe787b..d9e270957e18 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -242,7 +242,6 @@ struct raid_set {
struct mddev md;
struct raid_type *raid_type;
- struct dm_target_callbacks callbacks;
sector_t array_sectors;
sector_t dev_sectors;
@@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws)
dm_table_event(rs->ti->table);
}
-static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
-{
- struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
- return mddev_congested(&rs->md, bits);
-}
-
/*
* Make sure a valid takover (level switch) is being requested on @rs
*
@@ -3248,9 +3240,6 @@ size_check:
goto bad_md_start;
}
- rs->callbacks.congested_fn = raid_is_congested;
- dm_table_add_target_callbacks(ti->table, &rs->callbacks);
-
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
@@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
raid_set_free(rs);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 2f655d9f4200..fa09bc4e4c54 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
wakeup_mirrord(ms);
} else {
map_bio(get_default_mirror(ms), bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 85e0daabad49..7ce387a1cc6a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -284,7 +284,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
/*
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 963d3774c93e..63fab7c769be 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
/*
* Issue the synchronous I/O from a different thread
- * to avoid generic_make_request recursion.
+ * to avoid submit_bio_noacct recursion.
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
@@ -613,7 +613,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
chunk_t old, chunk_t new),
void *callback_context)
{
- int r, uninitialized_var(new_snapshot);
+ int r, new_snapshot;
struct pstore *ps = get_info(store);
/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6b11a266299f..4668b2cd98f4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1568,7 +1568,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1588,7 +1588,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
bio->bi_next = NULL;
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1829,7 +1829,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
bio->bi_end_io = full_bio_end_io;
bio->bi_private = callback_data;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static struct dm_snap_pending_exception *
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8277b959e00b..5edc3079e7c1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -64,8 +64,6 @@ struct dm_table {
void *event_context;
struct dm_md_mempools *mempools;
-
- struct list_head target_callbacks;
};
/*
@@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
- INIT_LIST_HEAD(&t->target_callbacks);
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* This upgrades the mode on an already open dm_dev, being
* careful to leave things as they were if we fail to reopen the
* device and not to touch the existing bdev field in case
- * it is accessed concurrently inside dm_table_any_congested().
+ * it is accessed concurrently.
*/
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
struct mapped_device *md)
@@ -461,7 +458,8 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (bdev_stack_limits(limits, bdev, start) < 0)
+ if (blk_stack_limits(limits, &q->limits,
+ get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
@@ -470,9 +468,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.logical_block_size,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
-
- limits->zoned = blk_queue_zoned_model(q);
-
return 0;
}
@@ -642,7 +637,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
*/
unsigned short remaining = 0;
- struct dm_target *uninitialized_var(ti);
+ struct dm_target *ti;
struct queue_limits ti_limits;
unsigned i;
@@ -1531,22 +1526,6 @@ combine_limits:
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
-
- /*
- * FIXME: this should likely be moved to blk_stack_limits(), would
- * also eliminate limits->zoned stacking hack in dm_set_device_limits()
- */
- if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
- /*
- * By default, the stacked limits zoned model is set to
- * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
- * this model using the first target model reported
- * that is not BLK_ZONED_NONE. This will be either the
- * first target device zoned model or the model reported
- * by the target .io_hints.
- */
- limits->zoned = ti_limits.zoned;
- }
}
/*
@@ -2052,38 +2031,6 @@ int dm_table_resume_targets(struct dm_table *t)
return 0;
}
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
-{
- list_add(&cb->list, &t->target_callbacks);
-}
-EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
-
-int dm_table_any_congested(struct dm_table *t, int bdi_bits)
-{
- struct dm_dev_internal *dd;
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_target_callbacks *cb;
- int r = 0;
-
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
- char b[BDEVNAME_SIZE];
-
- if (likely(q))
- r |= bdi_congested(q->backing_dev_info, bdi_bits);
- else
- DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
- dm_device_name(t->md),
- bdevname(dd->dm_dev->bdev, b));
- }
-
- list_for_each_entry(cb, &t->target_callbacks, list)
- if (cb->congested_fn)
- r |= cb->congested_fn(cb, bdi_bits);
-
- return r;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fa8d5464c1fb..fff4c50df74d 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -326,7 +326,6 @@ struct pool_c {
struct pool *pool;
struct dm_dev *data_dev;
struct dm_dev *metadata_dev;
- struct dm_target_callbacks callbacks;
dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
@@ -758,7 +757,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return;
}
@@ -2394,7 +2393,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
}
-static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
-{
- struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
- struct request_queue *q;
-
- if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
- return 1;
-
- q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(q->backing_dev_info, bdi_bits);
-}
-
static void requeue_bios(struct pool *pool)
{
struct thin_c *tc;
@@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);
- pt->callbacks.congested_fn = pool_is_congested;
- dm_table_add_target_callbacks(ti->table, &pt->callbacks);
-
mutex_unlock(&dm_thin_pool_table.mutex);
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index eec9f252e935..75fa4d9b7617 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -681,7 +681,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_submit_prefetch(v, io);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return DM_MAPIO_SUBMITTED;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5358894bb9fd..cfea054863a4 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1244,7 +1244,7 @@ static int writecache_flush_thread(void *data)
bio_end_sector(bio));
wc_unlock(wc);
bio_set_dev(bio, wc->dev->bdev);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
} else {
writecache_flush(wc);
wc_unlock(wc);
@@ -1752,7 +1752,7 @@ static void writecache_writeback(struct work_struct *work)
{
struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
struct blk_plug plug;
- struct wc_entry *f, *uninitialized_var(g), *e = NULL;
+ struct wc_entry *f, *g, *e = NULL;
struct rb_node *node, *next_node;
struct list_head skipped;
struct writeback_list wbl;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 42aa5139df7c..697f9de37355 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -140,7 +140,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
refcount_inc(&bioctx->ref);
- generic_make_request(clone);
+ submit_bio_noacct(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5b9de2f71bb0..87cf45f619fd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1273,7 +1273,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
- struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -1295,10 +1294,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
- if (md->type == DM_TYPE_NVME_BIO_BASED)
- ret = direct_make_request(clone);
- else
- ret = generic_make_request(clone);
+ ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
free_tio(tio);
@@ -1645,7 +1641,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
error = __split_and_process_non_flush(&ci);
if (current->bio_list && ci.sector_count && !error) {
/*
- * Remainder must be passed to generic_make_request()
+ * Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
* have been completely processed.
* We take a clone of the original to store in
@@ -1670,7 +1666,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
- ret = generic_make_request(bio);
+ ret = submit_bio_noacct(bio);
break;
}
}
@@ -1738,7 +1734,7 @@ static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struc
bio_chain(split, *bio);
trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
- generic_make_request(*bio);
+ submit_bio_noacct(*bio);
*bio = split;
}
}
@@ -1763,13 +1759,13 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
}
/*
- * If in ->make_request_fn we need to use blk_queue_split(), otherwise
+ * If in ->queue_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
* won't be imposed.
*/
if (current->bio_list) {
if (is_abnormal_io(bio))
- blk_queue_split(md->queue, &bio);
+ blk_queue_split(&bio);
else
dm_queue_split(md, ti, &bio);
}
@@ -1780,9 +1776,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
return __split_and_process_bio(md, map, bio);
}
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t dm_submit_bio(struct bio *bio)
{
- struct mapped_device *md = q->queuedata;
+ struct mapped_device *md = bio->bi_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1791,12 +1787,12 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
/*
* We are called with a live reference on q_usage_counter, but
* that one will be released as soon as we return. Grab an
- * extra one as blk_mq_make_request expects to be able to
- * consume a reference (which lives until the request is freed
- * in case a request is allocated).
+ * extra one as blk_mq_submit_bio expects to be able to consume
+ * a reference (which lives until the request is freed in case a
+ * request is allocated).
*/
- percpu_ref_get(&q->q_usage_counter);
- return blk_mq_make_request(q, bio);
+ percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
+ return blk_mq_submit_bio(bio);
}
map = dm_get_live_table(md, &srcu_idx);
@@ -1818,31 +1814,6 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return ret;
}
-static int dm_any_congested(void *congested_data, int bdi_bits)
-{
- int r = bdi_bits;
- struct mapped_device *md = congested_data;
- struct dm_table *map;
-
- if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- if (dm_request_based(md)) {
- /*
- * With request-based DM we only need to check the
- * top-level queue for congestion.
- */
- struct backing_dev_info *bdi = md->queue->backing_dev_info;
- r = bdi->wb.congested->state & bdi_bits;
- } else {
- map = dm_get_live_table_fast(md);
- if (map)
- r = dm_table_any_congested(map, bdi_bits);
- dm_put_live_table_fast(md);
- }
- }
-
- return r;
-}
-
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
@@ -1981,14 +1952,13 @@ static struct mapped_device *alloc_dev(int minor)
spin_lock_init(&md->uevent_lock);
/*
- * default to bio-based required ->make_request_fn until DM
- * table is loaded and md->type established. If request-based
- * table is loaded: blk-mq will override accordingly.
+ * default to bio-based until DM table is loaded and md->type
+ * established. If request-based table is loaded: blk-mq will
+ * override accordingly.
*/
- md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
+ md->queue = blk_alloc_queue(numa_node_id);
if (!md->queue)
goto bad;
- md->queue->queuedata = md;
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@@ -2282,12 +2252,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void dm_init_congested_fn(struct mapped_device *md)
-{
- md->queue->backing_dev_info->congested_data = md;
- md->queue->backing_dev_info->congested_fn = dm_any_congested;
-}
-
/*
* Setup the DM device's queue based on md's type
*/
@@ -2304,12 +2268,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
- dm_init_congested_fn(md);
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
- dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2531,7 +2493,7 @@ static void dm_wq_work(struct work_struct *work)
break;
if (dm_request_based(md))
- (void) generic_make_request(c);
+ (void) submit_bio_noacct(c);
else
(void) dm_process_bio(md, map, c);
}
@@ -3286,6 +3248,7 @@ static const struct pr_ops dm_pr_ops = {
};
static const struct block_device_operations dm_blk_dops = {
+ .submit_bio = dm_submit_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index d7c4f6606b5f..4f5fe664d05a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -63,7 +63,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
-int dm_table_any_congested(struct dm_table *t, int bdi_bits);
enum dm_queue_mode dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 95a5f3757fa3..d61b524ae440 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1631,7 +1631,7 @@ void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
s += blocks;
}
bitmap->last_end_sync = jiffies;
- sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
}
EXPORT_SYMBOL(md_bitmap_cond_end_sync);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 813a99ffa86f..73fd50e77975 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1518,6 +1518,7 @@ static void unlock_all_bitmaps(struct mddev *mddev)
}
}
kfree(cinfo->other_bitmap_lockres);
+ cinfo->other_bitmap_lockres = NULL;
}
}
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 50ad4ba86f0e..fda4cb3f936f 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
/* write request */
if (atomic_read(&conf->counters[WriteAll])) {
- /* special case - don't decrement, don't generic_make_request,
+ /* special case - don't decrement, don't submit_bio_noacct,
* just fail immediately
*/
bio_io_error(bio);
@@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
} else
bio_set_dev(bio, conf->rdev->bdev);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return true;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 26c75c0199fa..c2ae9125c4c3 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -46,29 +46,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
return conf->disks + lo;
}
-/*
- * In linear_congested() conf->raid_disks is used as a copy of
- * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
- * and conf->disks[] are created in linear_conf(), they are always
- * consitent with each other, but mddev->raid_disks does not.
- */
-static int linear_congested(struct mddev *mddev, int bits)
-{
- struct linear_conf *conf;
- int i, ret = 0;
-
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
-
- for (i = 0; i < conf->raid_disks && !ret ; i++) {
- struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- rcu_read_unlock();
- return ret;
-}
-
static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
struct linear_conf *conf;
@@ -267,7 +244,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, &mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
}
@@ -286,7 +263,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
return true;
@@ -322,7 +299,6 @@ static struct md_personality linear_personality =
.hot_add_disk = linear_add,
.size = linear_size,
.quiesce = linear_quiesce,
- .congested = linear_congested,
};
static int __init linear_init (void)
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 152f9e65a226..776bbe542db5 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -131,7 +131,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_private = mp_bh;
mddev_check_writesame(mddev, &mp_bh->bio);
mddev_check_write_zeroes(mddev, &mp_bh->bio);
- generic_make_request(&mp_bh->bio);
+ submit_bio_noacct(&mp_bh->bio);
return true;
}
@@ -151,28 +151,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
seq_putc(seq, ']');
}
-static int multipath_congested(struct mddev *mddev, int bits)
-{
- struct mpconf *conf = mddev->private;
- int i, ret = 0;
-
- rcu_read_lock();
- for (i = 0; i < mddev->raid_disks ; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- /* Just like multipath_map, we just check the
- * first available device
- */
- break;
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
/*
* Careful, this can execute in IRQ contexts as well!
*/
@@ -348,7 +326,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -478,7 +456,6 @@ static struct md_personality multipath_personality =
.hot_add_disk = multipath_add_disk,
.hot_remove_disk= multipath_remove_disk,
.size = multipath_size,
- .congested = multipath_congested,
};
static int __init multipath_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f567f536b529..153bc766bc5a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -101,6 +101,8 @@ static void mddev_detach(struct mddev *mddev);
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/* Default safemode delay: 200 msec */
+#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
@@ -199,7 +201,7 @@ static int rdevs_init_serial(struct mddev *mddev)
static int rdev_need_serial(struct md_rdev *rdev)
{
return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
- rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
test_bit(WriteMostly, &rdev->flags));
}
@@ -463,24 +465,46 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+struct md_io {
+ struct mddev *mddev;
+ bio_end_io_t *orig_bi_end_io;
+ void *orig_bi_private;
+ unsigned long start_time;
+};
+
+static void md_end_io(struct bio *bio)
+{
+ struct md_io *md_io = bio->bi_private;
+ struct mddev *mddev = md_io->mddev;
+
+ disk_end_io_acct(mddev->gendisk, bio_op(bio), md_io->start_time);
+
+ bio->bi_end_io = md_io->orig_bi_end_io;
+ bio->bi_private = md_io->orig_bi_private;
+
+ mempool_free(md_io, &mddev->md_io_pool);
+
+ if (bio->bi_end_io)
+ bio->bi_end_io(bio);
+}
+
+static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
- const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = bio->bi_disk->private_data;
- unsigned int sectors;
- if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
+ if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio);
-
- if (mddev == NULL || mddev->pers == NULL) {
+ if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
+
+ blk_queue_split(&bio);
+
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
@@ -488,21 +512,27 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- /*
- * save the sectors now since our bio can
- * go away inside make_request
- */
- sectors = bio_sectors(bio);
+ if (bio->bi_end_io != md_end_io) {
+ struct md_io *md_io;
+
+ md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
+ md_io->mddev = mddev;
+ md_io->orig_bi_end_io = bio->bi_end_io;
+ md_io->orig_bi_private = bio->bi_private;
+
+ bio->bi_end_io = md_end_io;
+ bio->bi_private = md_io;
+
+ md_io->start_time = disk_start_io_acct(mddev->gendisk,
+ bio_sectors(bio),
+ bio_op(bio));
+ }
+
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
md_handle_request(mddev, bio);
- part_stat_lock();
- part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
- part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
- part_stat_unlock();
-
return BLK_QC_T_NONE;
}
@@ -549,26 +579,6 @@ void mddev_resume(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_resume);
-int mddev_congested(struct mddev *mddev, int bits)
-{
- struct md_personality *pers = mddev->pers;
- int ret = 0;
-
- rcu_read_lock();
- if (mddev->suspended)
- ret = 1;
- else if (pers && pers->congested)
- ret = pers->congested(mddev, bits);
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(mddev_congested);
-static int md_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
- return mddev_congested(mddev, bits);
-}
-
/*
* Generic flush handling for md
*/
@@ -948,7 +958,8 @@ static void super_written(struct bio *bio)
struct mddev *mddev = rdev->mddev;
if (bio->bi_status) {
- pr_err("md: super_written gets error=%d\n", bio->bi_status);
+ pr_err("md: %s gets error=%d\n", __func__,
+ blk_status_to_errno(bio->bi_status));
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags)
&& (bio->bi_opf & MD_FAILFAST)) {
@@ -2163,6 +2174,24 @@ retry:
sb->sb_csum = calc_sb_1_csum(sb);
}
+static sector_t super_1_choose_bm_space(sector_t dev_size)
+{
+ sector_t bm_space;
+
+ /* if the device is bigger than 8Gig, save 64k for bitmap
+ * usage, if bigger than 200Gig, save 128k
+ */
+ if (dev_size < 64*2)
+ bm_space = 0;
+ else if (dev_size - 64*2 >= 200*1024*1024*2)
+ bm_space = 128*2;
+ else if (dev_size - 4*2 > 8*1024*1024*2)
+ bm_space = 64*2;
+ else
+ bm_space = 4*2;
+ return bm_space;
+}
+
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
@@ -2183,13 +2212,22 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
return 0;
} else {
/* minor version 0; superblock after data */
- sector_t sb_start;
- sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
+ sector_t sb_start, bm_space;
+ sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
+
+ /* 8K is for superblock */
+ sb_start = dev_size - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
- max_sectors = rdev->sectors + sb_start - rdev->sb_start;
+
+ bm_space = super_1_choose_bm_space(dev_size);
+
+ /* Space that can be used to store date needs to decrease
+ * superblock bitmap space and bad block space(4K)
+ */
+ max_sectors = sb_start - bm_space - 4*2;
+
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
- rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
@@ -2441,9 +2479,13 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
- if (sysfs_create_link(&rdev->kobj, ko, "block"))
- /* failure here is OK */;
+ /* failure here is OK */
+ err = sysfs_create_link(&rdev->kobj, ko, "block");
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
+ rdev->sysfs_unack_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
+ rdev->sysfs_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
@@ -2477,7 +2519,11 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
+ sysfs_put(rdev->sysfs_unack_badblocks);
+ sysfs_put(rdev->sysfs_badblocks);
rdev->sysfs_state = NULL;
+ rdev->sysfs_unack_badblocks = NULL;
+ rdev->sysfs_badblocks = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
@@ -2822,7 +2868,7 @@ rewrite:
goto repeat;
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
@@ -3202,8 +3248,8 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
- if (sysfs_link_rdev(rdev->mddev, rdev))
- /* failure here is OK */;
+ /* failure here is OK */;
+ sysfs_link_rdev(rdev->mddev, rdev);
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
@@ -4075,7 +4121,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
- sysfs_notify(&mddev->kobj, NULL, "level");
+ sysfs_notify_dirent_safe(mddev->sysfs_level);
md_new_event(mddev);
rv = len;
out_unlock:
@@ -4188,6 +4234,14 @@ static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
+uuid_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%pU\n", mddev->uuid);
+}
+static struct md_sysfs_entry md_uuid =
+__ATTR(uuid, S_IRUGO, uuid_show, NULL);
+
+static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
@@ -4828,7 +4882,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
if (err)
return err;
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -5443,6 +5497,7 @@ static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
+ &md_uuid.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
@@ -5534,6 +5589,13 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->sysfs_completed)
+ sysfs_put(mddev->sysfs_completed);
+ if (mddev->sysfs_degraded)
+ sysfs_put(mddev->sysfs_degraded);
+ if (mddev->sysfs_level)
+ sysfs_put(mddev->sysfs_level);
+
if (mddev->gendisk)
del_gendisk(mddev->gendisk);
@@ -5545,6 +5607,7 @@ static void md_free(struct kobject *ko)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
+ mempool_exit(&mddev->md_io_pool);
kfree(mddev);
}
@@ -5640,8 +5703,13 @@ static int md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
+ error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
+ sizeof(struct md_io));
+ if (error)
+ goto abort;
+
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE);
+ mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
@@ -5670,6 +5738,7 @@ static int md_alloc(dev_t dev, char *name)
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
@@ -5695,6 +5764,9 @@ static int md_alloc(dev_t dev, char *name)
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
}
mddev_put(mddev);
return error;
@@ -5964,8 +6036,6 @@ int md_run(struct mddev *mddev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info->congested_data = mddev;
- mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5982,7 +6052,7 @@ int md_run(struct mddev *mddev)
if (mddev_is_clustered(mddev))
mddev->safemode_delay = 0;
else
- mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
@@ -6049,7 +6119,7 @@ static int do_md_run(struct mddev *mddev)
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
out:
clear_bit(MD_NOT_READY, &mddev->flags);
return err;
@@ -6350,7 +6420,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -7361,6 +7430,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.nodes = 0;
md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
mddev_suspend(mddev);
md_bitmap_destroy(mddev);
@@ -7806,23 +7877,21 @@ static void md_release(struct gendisk *disk, fmode_t mode)
mddev_put(mddev);
}
-static int md_media_changed(struct gendisk *disk)
-{
- struct mddev *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
+static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
{
struct mddev *mddev = disk->private_data;
+ unsigned int ret = 0;
+ if (mddev->changed)
+ ret = DISK_EVENT_MEDIA_CHANGE;
mddev->changed = 0;
- return 0;
+ return ret;
}
+
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
+ .submit_bio = md_submit_bio,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
@@ -7830,8 +7899,7 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
+ .check_events = md_check_events,
};
static int md_thread(void *arg)
@@ -8355,6 +8423,7 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
+ int ret;
if (!md_cluster_ops)
request_module("md-cluster");
spin_lock(&pers_lock);
@@ -8366,7 +8435,10 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
}
spin_unlock(&pers_lock);
- return md_cluster_ops->join(mddev, nodes);
+ ret = md_cluster_ops->join(mddev, nodes);
+ if (!ret)
+ mddev->safemode_delay = 0;
+ return ret;
}
void md_cluster_stop(struct mddev *mddev)
@@ -8767,7 +8839,7 @@ void md_do_sync(struct md_thread *thread)
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event(mddev);
update_time = jiffies;
@@ -8795,7 +8867,7 @@ void md_do_sync(struct md_thread *thread)
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
while (j >= mddev->resync_max &&
@@ -8902,7 +8974,7 @@ void md_do_sync(struct md_thread *thread)
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
@@ -9032,7 +9104,7 @@ static int remove_and_add_spares(struct mddev *mddev,
}
if (removed && mddev->kobj.sd)
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
if (this && removed)
goto no_add;
@@ -9060,8 +9132,8 @@ static int remove_and_add_spares(struct mddev *mddev,
rdev->recovery_offset = 0;
}
if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
+ /* failure here is OK */
+ sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
@@ -9315,8 +9387,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
- sysfs_notify(&mddev->kobj, NULL,
- "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9406,8 +9477,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
if (rv == 0) {
/* Make sure they get written out promptly */
if (test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL,
- "unacknowledged_bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
@@ -9428,7 +9498,7 @@ int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s += rdev->data_offset;
rv = badblocks_clear(&rdev->badblocks, s, sectors);
if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -9658,7 +9728,7 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->recovery_offset == MaxSector &&
!test_bit(In_sync, &rdev->flags) &&
mddev->pers->spare_active(mddev))
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
put_page(swapout);
return 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 612814d07d35..f79b5b4101ef 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -126,7 +126,10 @@ struct md_rdev {
struct kernfs_node *sysfs_state; /* handle for 'state'
* sysfs entry */
-
+ /* handle for 'unacknowledged_bad_blocks' sysfs dentry */
+ struct kernfs_node *sysfs_unack_badblocks;
+ /* handle for 'bad_blocks' sysfs dentry */
+ struct kernfs_node *sysfs_badblocks;
struct badblocks badblocks;
struct {
@@ -420,6 +423,9 @@ struct mddev {
* file in sysfs.
*/
struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
+ struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */
+ struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */
+ struct kernfs_node *sysfs_level; /*handle for 'level' */
struct work_struct del_work; /* used for delayed sysfs removal */
@@ -481,6 +487,7 @@ struct mddev {
struct bio_set sync_set; /* for sync operations like
* metadata and bitmap writes
*/
+ mempool_t md_io_pool;
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
@@ -597,9 +604,6 @@ struct md_personality
* array.
*/
void *(*takeover) (struct mddev *mddev);
- /* congested implements bdi.congested_fn().
- * Will not be called while array is 'suspended' */
- int (*congested)(struct mddev *mddev, int bits);
/* Changes the consistency policy of an active array. */
int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
};
@@ -710,7 +714,6 @@ extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
-extern int mddev_congested(struct mddev *mddev, int bits);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 322386ff5d22..f54a449f97aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -29,21 +29,6 @@ module_param(default_layout, int, 0644);
(1L << MD_HAS_PPL) | \
(1L << MD_HAS_MULTIPLE_PPLS))
-static int raid0_congested(struct mddev *mddev, int bits)
-{
- struct r0conf *conf = mddev->private;
- struct md_rdev **devlist = conf->devlist;
- int raid_disks = conf->strip_zone[0].nb_dev;
- int i, ret = 0;
-
- for (i = 0; i < raid_disks && !ret ; i++) {
- struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
- return ret;
-}
-
/*
* inform the user of the raid configuration
*/
@@ -495,7 +480,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
end = zone->zone_end;
} else
@@ -559,7 +544,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector);
- generic_make_request(discard_bio);
+ submit_bio_noacct(discard_bio);
}
bio_endio(bio);
}
@@ -600,7 +585,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
}
@@ -633,7 +618,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
disk_devt(mddev->gendisk), bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
return true;
}
@@ -818,7 +803,6 @@ static struct md_personality raid0_personality=
.size = raid0_size,
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
- .congested = raid0_congested,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index dcd27f3da84e..960d854c07f8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
return best_disk;
}
-static int raid1_congested(struct mddev *mddev, int bits)
-{
- struct r1conf *conf = mddev->private;
- int i, ret = 0;
-
- if ((bits & (1 << WB_async_congested)) &&
- conf->pending_count >= max_queued_requests)
- return 1;
-
- rcu_read_lock();
- for (i = 0; i < conf->raid_disks * 2; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- BUG_ON(!q);
-
- /* Note the '|| 1' - when read_balance prefers
- * non-congested targets, it can be removed
- */
- if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(q->backing_dev_info, bits);
- else
- ret &= bdi_congested(q->backing_dev_info, bits);
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
@@ -834,7 +804,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
cond_resched();
}
@@ -1312,7 +1282,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
@@ -1338,7 +1308,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
disk_devt(mddev->gendisk), r1_bio->sector);
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1483,7 +1453,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = split;
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
@@ -2240,7 +2210,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
- generic_make_request(wbio);
+ submit_bio_noacct(wbio);
}
put_sync_write_buf(r1_bio, 1);
@@ -2926,7 +2896,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
} else {
@@ -2935,7 +2905,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
return nr_sectors;
}
@@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality =
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
- .congested = raid1_congested,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ec136e44aef7..e8fa32733917 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -848,31 +848,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
return rdev;
}
-static int raid10_congested(struct mddev *mddev, int bits)
-{
- struct r10conf *conf = mddev->private;
- int i, ret = 0;
-
- if ((bits & (1 << WB_async_congested)) &&
- conf->pending_count >= max_queued_requests)
- return 1;
-
- rcu_read_lock();
- for (i = 0;
- (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
- && ret == 0;
- i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct request_queue *q = bdev_get_queue(rdev->bdev);
-
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static void flush_pending_writes(struct r10conf *conf)
{
/* Any writes that have been queued but are awaiting
@@ -917,7 +892,7 @@ static void flush_pending_writes(struct r10conf *conf)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
}
blk_finish_plug(&plug);
@@ -980,6 +955,7 @@ static void wait_barrier(struct r10conf *conf)
{
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
+ struct bio_list *bio_list = current->bio_list;
conf->nr_waiting++;
/* Wait for the barrier to drop.
* However if there are already pending
@@ -994,9 +970,16 @@ static void wait_barrier(struct r10conf *conf)
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
- current->bio_list &&
- (!bio_list_empty(&current->bio_list[0]) ||
- !bio_list_empty(&current->bio_list[1]))),
+ bio_list &&
+ (!bio_list_empty(&bio_list[0]) ||
+ !bio_list_empty(&bio_list[1]))) ||
+ /* move on if recovery thread is
+ * blocked by us
+ */
+ (conf->mddev->thread->tsk == current &&
+ test_bit(MD_RECOVERY_RUNNING,
+ &conf->mddev->recovery) &&
+ conf->nr_queued > 0),
conf->resync_lock);
conf->nr_waiting--;
if (!conf->nr_waiting)
@@ -1102,7 +1085,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* Just ignore it */
bio_endio(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = next;
}
kfree(plug);
@@ -1194,7 +1177,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
gfp, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
@@ -1221,7 +1204,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
return;
}
@@ -1479,7 +1462,7 @@ retry_write:
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
allow_barrier(conf);
- generic_make_request(bio);
+ submit_bio_noacct(bio);
wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
@@ -2099,7 +2082,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
- generic_make_request(tbio);
+ submit_bio_noacct(tbio);
}
/* Now write out to any replacement devices
@@ -2118,7 +2101,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(tbio));
- generic_make_request(tbio);
+ submit_bio_noacct(tbio);
}
done:
@@ -2241,7 +2224,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
/* Need to test wbio2->bi_end_io before we call
- * generic_make_request as if the former is NULL,
+ * submit_bio_noacct as if the former is NULL,
* the latter is free to free wbio2.
*/
if (wbio2 && !wbio2->bi_end_io)
@@ -2249,13 +2232,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
- generic_make_request(wbio);
+ submit_bio_noacct(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
- generic_make_request(wbio2);
+ submit_bio_noacct(wbio2);
}
}
@@ -2889,7 +2872,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
* a number of r10_bio structures, one for each out-of-sync device.
* As we setup these structures, we collect all bio's together into a list
* which we then process collectively to add pages, and then process again
- * to pass to generic_make_request.
+ * to pass to submit_bio_noacct.
*
* The r10_bio structures are linked using a borrowed master_bio pointer.
* This link is counted in ->remaining. When the r10_bio that points to NULL
@@ -3496,7 +3479,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) {
md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -4307,8 +4290,8 @@ out:
else
rdev->recovery_offset = 0;
- if (sysfs_link_rdev(mddev, rdev))
- /* Failure here is OK */;
+ /* Failure here is OK */
+ sysfs_link_rdev(mddev, rdev);
}
} else if (rdev->raid_disk >= conf->prev.raid_disks
&& !test_bit(Faulty, &rdev->flags)) {
@@ -4454,7 +4437,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
sector_nr = conf->reshape_progress;
if (sector_nr) {
mddev->curr_resync_completed = sector_nr;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
*skipped = 1;
return sector_nr;
}
@@ -4654,7 +4637,7 @@ read_more:
md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
- generic_make_request(read_bio);
+ submit_bio_noacct(read_bio);
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
@@ -4717,7 +4700,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
- generic_make_request(b);
+ submit_bio_noacct(b);
}
end_reshape_request(r10_bio);
}
@@ -4929,7 +4912,6 @@ static struct md_personality raid10_personality =
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
.update_reshape_pos = raid10_update_reshape_pos,
- .congested = raid10_congested,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 9b6da759dca2..4337ae0e6af2 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -195,9 +195,7 @@ struct r5l_log {
static inline sector_t r5c_tree_index(struct r5conf *conf,
sector_t sect)
{
- sector_t offset;
-
- offset = sector_div(sect, conf->chunk_sectors);
+ sector_div(sect, conf->chunk_sectors);
return sect;
}
@@ -298,8 +296,8 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
- dev->sector + STRIPE_SECTORS) {
- wbi2 = r5_next_bio(wbi, dev->sector);
+ dev->sector + RAID5_STRIPE_SECTORS(conf)) {
+ wbi2 = r5_next_bio(conf, wbi, dev->sector);
md_write_end(conf->mddev);
bio_endio(wbi);
wbi = wbi2;
@@ -316,7 +314,7 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
+ RAID5_STRIPE_SECTORS(conf),
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
@@ -364,7 +362,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
*/
if (atomic_read(&conf->r5c_cached_full_stripes) >=
min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
- conf->chunk_sectors >> STRIPE_SHIFT))
+ conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
r5l_wake_reclaim(conf->log, 0);
}
@@ -2430,10 +2428,15 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
struct mddev *mddev = log->rdev->mddev;
struct r5conf *conf = mddev->private;
struct stripe_head *sh, *next;
+ bool cleared_pending = false;
if (ctx->data_only_stripes == 0)
return;
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
+ cleared_pending = true;
+ clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+ }
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
@@ -2448,6 +2451,8 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
atomic_read(&conf->active_stripes) == 0);
log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ if (cleared_pending)
+ set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
}
static int r5l_recovery_log(struct r5l_log *log)
@@ -2532,13 +2537,10 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
struct r5conf *conf;
int ret;
- ret = mddev_lock(mddev);
- if (ret)
- return ret;
-
+ spin_lock(&mddev->lock);
conf = mddev->private;
if (!conf || !conf->log) {
- mddev_unlock(mddev);
+ spin_unlock(&mddev->lock);
return 0;
}
@@ -2558,7 +2560,7 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
default:
ret = 0;
}
- mddev_unlock(mddev);
+ spin_unlock(&mddev->lock);
return ret;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index a750f4bbb5d9..d0f540296fe9 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -324,7 +324,7 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
* be just after the last logged stripe and write to the same
* disks. Use bit shift and logarithm to avoid 64-bit division.
*/
- if ((sh->sector == sh_last->sector + STRIPE_SECTORS) &&
+ if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
(data_sector >> ilog2(conf->chunk_sectors) ==
data_sector_last >> ilog2(conf->chunk_sectors)) &&
((data_sector - data_sector_last) * data_disks ==
@@ -844,9 +844,9 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
/* if start and end is 4k aligned, use a 4k block */
if (block_size == 512 &&
- (r_sector_first & (STRIPE_SECTORS - 1)) == 0 &&
- (r_sector_last & (STRIPE_SECTORS - 1)) == 0)
- block_size = STRIPE_SIZE;
+ (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
+ (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
+ block_size = RAID5_STRIPE_SIZE(conf);
/* iterate through blocks in strip */
for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
@@ -1274,7 +1274,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
if (ppl_data_sectors > 0)
- ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS);
+ ppl_data_sectors = rounddown(ppl_data_sectors,
+ RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
if (ppl_data_sectors <= 0) {
pr_warn("md/raid:%s: PPL space too small on %s\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ab8067f9ce8c..fb8d1fb14088 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -69,13 +69,13 @@ static struct workqueue_struct *raid5_wq;
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
- int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
+ int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK;
return &conf->stripe_hashtbl[hash];
}
-static inline int stripe_hash_locks_hash(sector_t sect)
+static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect)
{
- return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
+ return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK;
}
static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
@@ -627,7 +627,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
- int hash = stripe_hash_locks_hash(sector);
+ int hash = stripe_hash_locks_hash(conf, sector);
int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
@@ -748,9 +748,9 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
tmp_sec = sh->sector;
if (!sector_div(tmp_sec, conf->chunk_sectors))
return;
- head_sector = sh->sector - STRIPE_SECTORS;
+ head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
- hash = stripe_hash_locks_hash(head_sector);
+ hash = stripe_hash_locks_hash(conf, head_sector);
spin_lock_irq(conf->hash_locks + hash);
head = __find_stripe(conf, head_sector, conf->generation);
if (head && !atomic_inc_not_zero(&head->count)) {
@@ -873,7 +873,7 @@ static void dispatch_bio_list(struct bio_list *tmp)
struct bio *bio;
while ((bio = bio_list_pop(tmp)))
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b)
@@ -1057,7 +1057,7 @@ again:
test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
int bad_sectors;
- int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
&first_bad, &bad_sectors);
if (!bad)
break;
@@ -1089,7 +1089,7 @@ again:
if (rdev) {
if (s->syncing || s->expanding || s->expanded
|| s->replacing)
- md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+ md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -1129,9 +1129,9 @@ again:
else
sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
- bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_iter.bi_size = STRIPE_SIZE;
+ bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
bi->bi_write_hint = sh->dev[i].write_hint;
if (!rrdev)
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
@@ -1151,12 +1151,12 @@ again:
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
- generic_make_request(bi);
+ submit_bio_noacct(bi);
}
if (rrdev) {
if (s->syncing || s->expanding || s->expanded
|| s->replacing)
- md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
+ md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -1183,9 +1183,9 @@ again:
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
sh->dev[i].rvec.bv_page = sh->dev[i].page;
rbi->bi_vcnt = 1;
- rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
rbi->bi_io_vec[0].bv_offset = 0;
- rbi->bi_iter.bi_size = STRIPE_SIZE;
+ rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
rbi->bi_write_hint = sh->dev[i].write_hint;
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
@@ -1201,7 +1201,7 @@ again:
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
- generic_make_request(rbi);
+ submit_bio_noacct(rbi);
}
if (!rdev && !rrdev) {
if (op_is_write(op))
@@ -1235,6 +1235,7 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
+ struct r5conf *conf = sh->raid_conf;
if (bio->bi_iter.bi_sector >= sector)
page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
@@ -1256,8 +1257,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
len -= b_offset;
}
- if (len > 0 && page_offset + len > STRIPE_SIZE)
- clen = STRIPE_SIZE - page_offset;
+ if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf))
+ clen = RAID5_STRIPE_SIZE(conf) - page_offset;
else
clen = len;
@@ -1265,9 +1266,9 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
b_offset += bvl.bv_offset;
bio_page = bvl.bv_page;
if (frombio) {
- if (sh->raid_conf->skip_copy &&
+ if (conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
- clen == STRIPE_SIZE &&
+ clen == RAID5_STRIPE_SIZE(conf) &&
!no_skipcopy)
*page = bio_page;
else
@@ -1292,6 +1293,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
int i;
+ struct r5conf *conf = sh->raid_conf;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1312,8 +1314,8 @@ static void ops_complete_biofill(void *stripe_head_ref)
rbi = dev->read;
dev->read = NULL;
while (rbi && rbi->bi_iter.bi_sector <
- dev->sector + STRIPE_SECTORS) {
- rbi2 = r5_next_bio(rbi, dev->sector);
+ dev->sector + RAID5_STRIPE_SECTORS(conf)) {
+ rbi2 = r5_next_bio(conf, rbi, dev->sector);
bio_endio(rbi);
rbi = rbi2;
}
@@ -1330,6 +1332,7 @@ static void ops_run_biofill(struct stripe_head *sh)
struct dma_async_tx_descriptor *tx = NULL;
struct async_submit_ctl submit;
int i;
+ struct r5conf *conf = sh->raid_conf;
BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
@@ -1344,10 +1347,10 @@ static void ops_run_biofill(struct stripe_head *sh)
dev->toread = NULL;
spin_unlock_irq(&sh->stripe_lock);
while (rbi && rbi->bi_iter.bi_sector <
- dev->sector + STRIPE_SECTORS) {
+ dev->sector + RAID5_STRIPE_SECTORS(conf)) {
tx = async_copy_data(0, rbi, &dev->page,
dev->sector, tx, sh, 0);
- rbi = r5_next_bio(rbi, dev->sector);
+ rbi = r5_next_bio(conf, rbi, dev->sector);
}
}
}
@@ -1429,9 +1432,11 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
if (unlikely(count == 1))
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
else
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ tx = async_xor(xor_dest, xor_srcs, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
}
@@ -1522,7 +1527,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ tx = async_gen_syndrome(blocks, 0, count+2,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
} else {
/* Compute any data- or p-drive using XOR */
count = 0;
@@ -1535,7 +1541,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
+ tx = async_xor(dest, blocks, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
}
return tx;
@@ -1598,7 +1605,8 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
- STRIPE_SIZE, &submit);
+ RAID5_STRIPE_SIZE(sh->raid_conf),
+ &submit);
} else {
struct page *dest;
int data_target;
@@ -1621,7 +1629,8 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
to_addr_conv(sh, percpu, 0));
- tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
+ tx = async_xor(dest, blocks, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf),
&submit);
count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
@@ -1629,7 +1638,8 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, count+2,
- STRIPE_SIZE, &submit);
+ RAID5_STRIPE_SIZE(sh->raid_conf),
+ &submit);
}
} else {
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
@@ -1638,13 +1648,15 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
if (failb == syndrome_disks) {
/* We're missing D+P. */
return async_raid6_datap_recov(syndrome_disks+2,
- STRIPE_SIZE, faila,
- blocks, &submit);
+ RAID5_STRIPE_SIZE(sh->raid_conf),
+ faila,
+ blocks, &submit);
} else {
/* We're missing D+D. */
return async_raid6_2data_recov(syndrome_disks+2,
- STRIPE_SIZE, faila, failb,
- blocks, &submit);
+ RAID5_STRIPE_SIZE(sh->raid_conf),
+ faila, failb,
+ blocks, &submit);
}
}
}
@@ -1691,7 +1703,8 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ tx = async_xor(xor_dest, xor_srcs, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
}
@@ -1711,7 +1724,8 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
- tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ tx = async_gen_syndrome(blocks, 0, count+2,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
}
@@ -1752,7 +1766,7 @@ again:
WARN_ON(dev->page != dev->orig_page);
while (wbi && wbi->bi_iter.bi_sector <
- dev->sector + STRIPE_SECTORS) {
+ dev->sector + RAID5_STRIPE_SECTORS(conf)) {
if (wbi->bi_opf & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
if (wbi->bi_opf & REQ_SYNC)
@@ -1770,7 +1784,7 @@ again:
clear_bit(R5_OVERWRITE, &dev->flags);
}
}
- wbi = r5_next_bio(wbi, dev->sector);
+ wbi = r5_next_bio(conf, wbi, dev->sector);
}
if (head_sh->batch_head) {
@@ -1910,9 +1924,11 @@ again:
}
if (unlikely(count == 1))
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
else
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ tx = async_xor(xor_dest, xor_srcs, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
if (!last_stripe) {
j++;
sh = list_first_entry(&sh->batch_list, struct stripe_head,
@@ -1972,7 +1988,8 @@ again:
} else
init_async_submit(&submit, 0, tx, NULL, NULL,
to_addr_conv(sh, percpu, j));
- tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ tx = async_gen_syndrome(blocks, 0, count+2,
+ RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
if (!last_stripe) {
j++;
sh = list_first_entry(&sh->batch_list, struct stripe_head,
@@ -2020,7 +2037,8 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, 0, NULL, NULL, NULL,
to_addr_conv(sh, percpu, 0));
- tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ tx = async_xor_val(xor_dest, xor_srcs, 0, count,
+ RAID5_STRIPE_SIZE(sh->raid_conf),
&sh->ops.zero_sum_result, &submit);
atomic_inc(&sh->count);
@@ -2045,7 +2063,8 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
sh, to_addr_conv(sh, percpu, 0));
- async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
+ async_syndrome_val(srcs, 0, count+2,
+ RAID5_STRIPE_SIZE(sh->raid_conf),
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
@@ -2217,9 +2236,9 @@ static int grow_stripes(struct r5conf *conf, int num)
/**
* scribble_alloc - allocate percpu scribble buffer for required size
* of the scribble region
- * @percpu - from for_each_present_cpu() of the caller
- * @num - total number of disks in the array
- * @cnt - scribble objs count for required size of the scribble region
+ * @percpu: from for_each_present_cpu() of the caller
+ * @num: total number of disks in the array
+ * @cnt: scribble objs count for required size of the scribble region
*
* The scribble buffer size must be enough to contain:
* 1/ a struct page pointer for each device in the array +2
@@ -2275,7 +2294,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
percpu = per_cpu_ptr(conf->percpu, cpu);
err = scribble_alloc(percpu, new_disks,
- new_sectors / STRIPE_SECTORS);
+ new_sectors / RAID5_STRIPE_SECTORS(conf));
if (err)
break;
}
@@ -2509,10 +2528,10 @@ static void raid5_end_read_request(struct bio * bi)
*/
pr_info_ratelimited(
"md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
- mdname(conf->mddev), STRIPE_SECTORS,
+ mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
(unsigned long long)s,
bdevname(rdev->bdev, b));
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
+ atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
@@ -2585,7 +2604,7 @@ static void raid5_end_read_request(struct bio * bi)
if (!(set_bad
&& test_bit(In_sync, &rdev->flags)
&& rdev_set_badblocks(
- rdev, sh->sector, STRIPE_SECTORS, 0)))
+ rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0)))
md_error(conf->mddev, rdev);
}
}
@@ -2601,7 +2620,7 @@ static void raid5_end_write_request(struct bio *bi)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
- struct md_rdev *uninitialized_var(rdev);
+ struct md_rdev *rdev;
sector_t first_bad;
int bad_sectors;
int replacement = 0;
@@ -2637,7 +2656,7 @@ static void raid5_end_write_request(struct bio *bi)
if (bi->bi_status)
md_error(conf->mddev, rdev);
else if (is_badblock(rdev, sh->sector,
- STRIPE_SECTORS,
+ RAID5_STRIPE_SECTORS(conf),
&first_bad, &bad_sectors))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
@@ -2649,7 +2668,7 @@ static void raid5_end_write_request(struct bio *bi)
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
} else if (is_badblock(rdev, sh->sector,
- STRIPE_SECTORS,
+ RAID5_STRIPE_SECTORS(conf),
&first_bad, &bad_sectors)) {
set_bit(R5_MadeGood, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags))
@@ -3283,13 +3302,13 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
/* check if page is covered */
sector_t sector = sh->dev[dd_idx].sector;
for (bi=sh->dev[dd_idx].towrite;
- sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
+ sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
bi && bi->bi_iter.bi_sector <= sector;
- bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
+ bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
if (bio_end_sector(bi) >= sector)
sector = bio_end_sector(bi);
}
- if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
+ if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf))
if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
sh->overwrite_disks++;
}
@@ -3314,7 +3333,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
set_bit(STRIPE_BITMAP_PENDING, &sh->state);
spin_unlock_irq(&sh->stripe_lock);
md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0);
+ RAID5_STRIPE_SECTORS(conf), 0);
spin_lock_irq(&sh->stripe_lock);
clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
if (!sh->batch_head) {
@@ -3376,7 +3395,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (!rdev_set_badblocks(
rdev,
sh->sector,
- STRIPE_SECTORS, 0))
+ RAID5_STRIPE_SECTORS(conf), 0))
md_error(conf->mddev, rdev);
rdev_dec_pending(rdev, conf->mddev);
}
@@ -3396,8 +3415,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
wake_up(&conf->wait_for_overlap);
while (bi && bi->bi_iter.bi_sector <
- sh->dev[i].sector + STRIPE_SECTORS) {
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+ sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
+ struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
md_write_end(conf->mddev);
bio_io_error(bi);
@@ -3405,7 +3424,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
}
if (bitmap_end)
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
+ RAID5_STRIPE_SECTORS(conf), 0, 0);
bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
@@ -3417,8 +3436,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
- sh->dev[i].sector + STRIPE_SECTORS) {
- struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
+ sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
+ struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
md_write_end(conf->mddev);
bio_io_error(bi);
@@ -3441,9 +3460,9 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
if (bi)
s->to_read--;
while (bi && bi->bi_iter.bi_sector <
- sh->dev[i].sector + STRIPE_SECTORS) {
+ sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *nextbi =
- r5_next_bio(bi, sh->dev[i].sector);
+ r5_next_bio(conf, bi, sh->dev[i].sector);
bio_io_error(bi);
bi = nextbi;
@@ -3451,7 +3470,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
}
if (bitmap_end)
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
+ RAID5_STRIPE_SECTORS(conf), 0, 0);
/* If we were in the middle of a write the parity block might
* still be locked - so just clear all R5_LOCKED flags
*/
@@ -3496,14 +3515,14 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& !rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ RAID5_STRIPE_SECTORS(conf), 0))
abort = 1;
rdev = rcu_dereference(conf->disks[i].replacement);
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& !rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ RAID5_STRIPE_SECTORS(conf), 0))
abort = 1;
}
rcu_read_unlock();
@@ -3511,7 +3530,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
conf->recovery_disabled =
conf->mddev->recovery_disabled;
}
- md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
+ md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort);
}
static int want_replace(struct stripe_head *sh, int disk_idx)
@@ -3538,6 +3557,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
&sh->dev[s->failed_num[1]] };
int i;
+ bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW);
if (test_bit(R5_LOCKED, &dev->flags) ||
@@ -3596,17 +3616,27 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
* devices must be read.
*/
return 1;
+
+ if (s->failed >= 2 &&
+ (fdev[i]->towrite ||
+ s->failed_num[i] == sh->pd_idx ||
+ s->failed_num[i] == sh->qd_idx) &&
+ !test_bit(R5_UPTODATE, &fdev[i]->flags))
+ /* In max degraded raid6, If the failed disk is P, Q,
+ * or we want to read the failed disk, we need to do
+ * reconstruct-write.
+ */
+ force_rcw = true;
}
- /* If we are forced to do a reconstruct-write, either because
- * the current RAID6 implementation only supports that, or
- * because parity cannot be trusted and we are currently
- * recovering it, there is extra need to be careful.
+ /* If we are forced to do a reconstruct-write, because parity
+ * cannot be trusted and we are currently recovering it, there
+ * is extra need to be careful.
* If one of the devices that we would need to read, because
* it is not being overwritten (and maybe not written at all)
* is missing/faulty, then we need to read everything we can.
*/
- if (sh->raid_conf->level != 6 &&
+ if (!force_rcw &&
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
@@ -3710,7 +3740,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
return 0;
}
-/**
+/*
* handle_stripe_fill - read or compute data to satisfy pending requests.
*/
static void handle_stripe_fill(struct stripe_head *sh,
@@ -3785,14 +3815,14 @@ returnbi:
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
- dev->sector + STRIPE_SECTORS) {
- wbi2 = r5_next_bio(wbi, dev->sector);
+ dev->sector + RAID5_STRIPE_SECTORS(conf)) {
+ wbi2 = r5_next_bio(conf, wbi, dev->sector);
md_write_end(conf->mddev);
bio_endio(wbi);
wbi = wbi2;
}
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
+ RAID5_STRIPE_SECTORS(conf),
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
if (head_sh->batch_head) {
@@ -3976,10 +4006,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
- } else {
+ } else
set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
}
}
}
@@ -4004,10 +4032,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(R5_Wantread, &dev->flags);
s->locked++;
qread++;
- } else {
+ } else
set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
}
}
if (rcw && conf->mddev->queue)
@@ -4099,7 +4125,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
*/
set_bit(STRIPE_INSYNC, &sh->state);
else {
- atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+ atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
@@ -4107,7 +4133,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
"%llu-%llu\n", mdname(conf->mddev),
(unsigned long long) sh->sector,
(unsigned long long) sh->sector +
- STRIPE_SECTORS);
+ RAID5_STRIPE_SECTORS(conf));
} else {
sh->check_state = check_state_compute_run;
set_bit(STRIPE_COMPUTE_RUN, &sh->state);
@@ -4264,7 +4290,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
*/
}
} else {
- atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
+ atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
@@ -4272,7 +4298,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
"%llu-%llu\n", mdname(conf->mddev),
(unsigned long long) sh->sector,
(unsigned long long) sh->sector +
- STRIPE_SECTORS);
+ RAID5_STRIPE_SECTORS(conf));
} else {
int *target = &sh->ops.target;
@@ -4343,7 +4369,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
/* place all the copies on one channel */
init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
tx = async_memcpy(sh2->dev[dd_idx].page,
- sh->dev[i].page, 0, 0, STRIPE_SIZE,
+ sh->dev[i].page, 0, 0, RAID5_STRIPE_SIZE(conf),
&submit);
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
@@ -4442,8 +4468,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
rdev = rcu_dereference(conf->disks[i].replacement);
if (rdev && !test_bit(Faulty, &rdev->flags) &&
- rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
- !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
+ !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
&first_bad, &bad_sectors))
set_bit(R5_ReadRepl, &dev->flags);
else {
@@ -4457,7 +4483,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev) {
- is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
&first_bad, &bad_sectors);
if (s->blocked_rdev == NULL
&& (test_bit(Blocked, &rdev->flags)
@@ -4484,7 +4510,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
+ else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
/* in sync if before recovery_offset */
set_bit(R5_Insync, &dev->flags);
else if (test_bit(R5_UPTODATE, &dev->flags) &&
@@ -4573,12 +4599,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rcu_read_unlock();
}
+/*
+ * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
+ * a head which can now be handled.
+ */
static int clear_batch_ready(struct stripe_head *sh)
{
- /* Return '1' if this is a member of batch, or
- * '0' if it is a lone stripe or a head which can now be
- * handled.
- */
struct stripe_head *tmp;
if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
return (sh->batch_head && sh->batch_head != sh);
@@ -4682,6 +4708,16 @@ static void handle_stripe(struct stripe_head *sh)
struct r5dev *pdev, *qdev;
clear_bit(STRIPE_HANDLE, &sh->state);
+
+ /*
+ * handle_stripe should not continue handle the batched stripe, only
+ * the head of batch list or lone stripe can continue. Otherwise we
+ * could see break_stripe_batch_list warns about the STRIPE_ACTIVE
+ * is set for the batched stripe.
+ */
+ if (clear_batch_ready(sh))
+ return;
+
if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
/* already being handled, ensure it gets handled
* again when current action finishes */
@@ -4689,11 +4725,6 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
- if (clear_batch_ready(sh) ) {
- clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
- return;
- }
-
if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
break_stripe_batch_list(sh, 0);
@@ -4842,7 +4873,7 @@ static void handle_stripe(struct stripe_head *sh)
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite
- || (conf->level == 6 && s.to_write && s.failed)
+ || (s.to_write && s.failed)
|| (s.syncing && (s.uptodate + s.compute < disks))
|| s.replacing
|| s.expanding)
@@ -4927,7 +4958,7 @@ static void handle_stripe(struct stripe_head *sh)
if ((s.syncing || s.replacing) && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
+ md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
wake_up(&conf->wait_for_overlap);
@@ -4946,14 +4977,11 @@ static void handle_stripe(struct stripe_head *sh)
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
set_bit(R5_ReWrite, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- } else {
+ } else
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- }
+ set_bit(R5_LOCKED, &dev->flags);
+ s.locked++;
}
}
@@ -4995,7 +5023,7 @@ static void handle_stripe(struct stripe_head *sh)
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
- md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
+ md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
}
if (s.expanding && s.locked == 0 &&
@@ -5025,14 +5053,14 @@ finish:
/* We own a safe reference to the rdev */
rdev = conf->disks[i].rdev;
if (!rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ RAID5_STRIPE_SECTORS(conf), 0))
md_error(conf->mddev, rdev);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0);
+ RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
@@ -5041,7 +5069,7 @@ finish:
/* rdev have been moved down */
rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0);
+ RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
}
}
@@ -5099,28 +5127,6 @@ static void activate_bit_delay(struct r5conf *conf,
}
}
-static int raid5_congested(struct mddev *mddev, int bits)
-{
- struct r5conf *conf = mddev->private;
-
- /* No difference between reads and writes. Just check
- * how busy the stripe_cache is
- */
-
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- return 1;
-
- /* Also checks whether there is pressure on r5cache log space */
- if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
- return 1;
- if (conf->quiesce)
- return 1;
- if (atomic_read(&conf->empty_inactive_list_nr))
- return 1;
-
- return 0;
-}
-
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
struct r5conf *conf = mddev->private;
@@ -5289,7 +5295,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
- generic_make_request(align_bi);
+ submit_bio_noacct(align_bi);
return 1;
} else {
rcu_read_unlock();
@@ -5309,7 +5315,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
struct r5conf *conf = mddev->private;
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
- generic_make_request(raid_bio);
+ submit_bio_noacct(raid_bio);
raid_bio = split;
}
@@ -5505,7 +5511,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
/* Skip discard while reshape is happening */
return;
- logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
@@ -5520,7 +5526,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
last_sector *= conf->chunk_sectors;
for (; logical_sector < last_sector;
- logical_sector += STRIPE_SECTORS) {
+ logical_sector += RAID5_STRIPE_SECTORS(conf)) {
DEFINE_WAIT(w);
int d;
again:
@@ -5565,7 +5571,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
d++)
md_bitmap_startwrite(mddev->bitmap,
sh->sector,
- STRIPE_SECTORS,
+ RAID5_STRIPE_SECTORS(conf),
0);
sh->bm_seq = conf->seq_flush + 1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
@@ -5630,12 +5636,12 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
return true;
}
- logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
+ for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
int previous;
int seq;
@@ -5733,8 +5739,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
do_flush = false;
}
- if (!sh->batch_head || sh == sh->batch_head)
- set_bit(STRIPE_HANDLE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
(bi->bi_opf & REQ_SYNC) &&
@@ -5799,7 +5804,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sector_div(sector_nr, new_data_disks);
if (sector_nr) {
mddev->curr_resync_completed = sector_nr;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
*skipped = 1;
retn = sector_nr;
goto finish;
@@ -5913,11 +5918,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
INIT_LIST_HEAD(&stripes);
- for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
+ for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
int j;
int skipped_disk = 0;
sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
@@ -5938,7 +5943,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
skipped_disk = 1;
continue;
}
- memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
+ memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf));
set_bit(R5_Expanded, &sh->dev[j].flags);
set_bit(R5_UPTODATE, &sh->dev[j].flags);
}
@@ -5973,7 +5978,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
- first_sector += STRIPE_SECTORS;
+ first_sector += RAID5_STRIPE_SECTORS(conf);
}
/* Now that the sources are clearly marked, we can release
* the destination stripes
@@ -6020,7 +6025,7 @@ finish:
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
ret:
return retn;
@@ -6079,11 +6084,12 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
- sync_blocks >= STRIPE_SECTORS) {
+ sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */
- sync_blocks /= STRIPE_SECTORS;
+ do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
*skipped = 1;
- return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
+ /* keep things rounded to whole stripes */
+ return sync_blocks * RAID5_STRIPE_SECTORS(conf);
}
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
@@ -6116,7 +6122,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
raid5_release_stripe(sh);
- return STRIPE_SECTORS;
+ return RAID5_STRIPE_SECTORS(conf);
}
static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
@@ -6139,14 +6145,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
int handled = 0;
logical_sector = raid_bio->bi_iter.bi_sector &
- ~((sector_t)STRIPE_SECTORS-1);
+ ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL);
last_sector = bio_end_sector(raid_bio);
for (; logical_sector < last_sector;
- logical_sector += STRIPE_SECTORS,
- sector += STRIPE_SECTORS,
+ logical_sector += RAID5_STRIPE_SECTORS(conf),
+ sector += RAID5_STRIPE_SECTORS(conf),
scnt++) {
if (scnt < offset)
@@ -6479,6 +6485,77 @@ raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
raid5_show_rmw_level,
raid5_store_rmw_level);
+static ssize_t
+raid5_show_stripe_size(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf;
+ int ret = 0;
+
+ spin_lock(&mddev->lock);
+ conf = mddev->private;
+ if (conf)
+ ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf));
+ spin_unlock(&mddev->lock);
+ return ret;
+}
+
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+static ssize_t
+raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf;
+ unsigned long new;
+ int err;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ /*
+ * The value should not be bigger than PAGE_SIZE. It requires to
+ * be multiple of DEFAULT_STRIPE_SIZE.
+ */
+ if (new % DEFAULT_STRIPE_SIZE != 0 || new > PAGE_SIZE || new == 0)
+ return -EINVAL;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+
+ conf = mddev->private;
+ if (!conf) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (new == conf->stripe_size)
+ goto out_unlock;
+
+ pr_debug("md/raid: change stripe_size from %lu to %lu\n",
+ conf->stripe_size, new);
+
+ mddev_suspend(mddev);
+ conf->stripe_size = new;
+ conf->stripe_shift = ilog2(new) - 9;
+ conf->stripe_sectors = new >> 9;
+ mddev_resume(mddev);
+
+out_unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry
+raid5_stripe_size = __ATTR(stripe_size, 0644,
+ raid5_show_stripe_size,
+ raid5_store_stripe_size);
+#else
+static struct md_sysfs_entry
+raid5_stripe_size = __ATTR(stripe_size, 0444,
+ raid5_show_stripe_size,
+ NULL);
+#endif
static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
@@ -6667,6 +6744,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
&raid5_rmw_level.attr,
+ &raid5_stripe_size.attr,
&r5c_journal_mode.attr,
&ppl_write_hint.attr,
NULL,
@@ -6766,7 +6844,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
conf->previous_raid_disks),
max(conf->chunk_sectors,
conf->prev_chunk_sectors)
- / STRIPE_SECTORS)) {
+ / RAID5_STRIPE_SECTORS(conf))) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}
@@ -6918,6 +6996,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
if (conf == NULL)
goto abort;
+
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ conf->stripe_size = DEFAULT_STRIPE_SIZE;
+ conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9;
+ conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9;
+#endif
INIT_LIST_HEAD(&conf->free_list);
INIT_LIST_HEAD(&conf->pending_list);
conf->pending_data = kcalloc(PENDING_IO_MAX,
@@ -7069,8 +7153,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->min_nr_stripes = NR_STRIPES;
if (mddev->reshape_position != MaxSector) {
int stripes = max_t(int,
- ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
- ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+ ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4,
+ ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4);
conf->min_nr_stripes = max(NR_STRIPES, stripes);
if (conf->min_nr_stripes != NR_STRIPES)
pr_info("md/raid:%s: force stripe size %d for reshape\n",
@@ -7801,14 +7885,14 @@ static int check_stripe_cache(struct mddev *mddev)
* stripe_heads first.
*/
struct r5conf *conf = mddev->private;
- if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
+ if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
> conf->min_nr_stripes ||
- ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
+ ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
> conf->min_nr_stripes) {
pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
- / STRIPE_SIZE)*4);
+ / RAID5_STRIPE_SIZE(conf))*4);
return 0;
}
return 1;
@@ -7944,8 +8028,8 @@ static int raid5_start_reshape(struct mddev *mddev)
else
rdev->recovery_offset = 0;
- if (sysfs_link_rdev(mddev, rdev))
- /* Failure here is OK */;
+ /* Failure here is OK */
+ sysfs_link_rdev(mddev, rdev);
}
} else if (rdev->raid_disk >= conf->previous_raid_disks
&& !test_bit(Faulty, &rdev->flags)) {
@@ -8140,7 +8224,7 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
while (chunksect && (mddev->array_sectors & (chunksect-1)))
chunksect >>= 1;
- if ((chunksect<<9) < STRIPE_SIZE)
+ if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private))
/* array size does not allow a suitable chunk size */
return ERR_PTR(-EINVAL);
@@ -8427,7 +8511,6 @@ static struct md_personality raid6_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};
static struct md_personality raid5_personality =
@@ -8452,7 +8535,6 @@ static struct md_personality raid5_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};
@@ -8478,7 +8560,6 @@ static struct md_personality raid4_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
- .congested = raid5_congested,
.change_consistency_policy = raid5_change_consistency_policy,
};
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index f90e0704bed9..7fb3b26a181a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -472,32 +472,20 @@ struct disk_info {
*/
#define NR_STRIPES 256
+#define DEFAULT_STRIPE_SIZE 4096
+
+#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
#define STRIPE_SIZE PAGE_SIZE
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
+#endif
+
#define IO_THRESHOLD 1
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
#define MAX_STRIPE_BATCH 8
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap. There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the
- * sector of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
- if (bio_end_sector(bio) < sector + STRIPE_SECTORS)
- return bio->bi_next;
- else
- return NULL;
-}
-
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause
@@ -574,6 +562,11 @@ struct r5conf {
int raid_disks;
int max_nr_stripes;
int min_nr_stripes;
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ unsigned long stripe_size;
+ unsigned int stripe_shift;
+ unsigned long stripe_sectors;
+#endif
/* reshape_progress is the leading edge of a 'reshape'
* It has value MaxSector when no reshape is happening
@@ -690,6 +683,32 @@ struct r5conf {
struct r5pending_data *next_pending_data;
};
+#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
+#define RAID5_STRIPE_SIZE(conf) STRIPE_SIZE
+#define RAID5_STRIPE_SHIFT(conf) STRIPE_SHIFT
+#define RAID5_STRIPE_SECTORS(conf) STRIPE_SECTORS
+#else
+#define RAID5_STRIPE_SIZE(conf) ((conf)->stripe_size)
+#define RAID5_STRIPE_SHIFT(conf) ((conf)->stripe_shift)
+#define RAID5_STRIPE_SECTORS(conf) ((conf)->stripe_sectors)
+#endif
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap. There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
+{
+ if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
+ return bio->bi_next;
+ else
+ return NULL;
+}
/*
* Our supported algorithms
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 6ec277421390..e5bffaaeed38 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -640,7 +640,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- u32 uninitialized_var(tmp);
+ u32 tmp;
u8 u8tmp, buf[2];
u16 u16tmp;
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index 85bbdd4ecdbb..e48faf942830 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -215,7 +215,7 @@ static int qt1010_set_params(struct dvb_frontend *fe)
static int qt1010_init_meas1(struct qt1010_priv *priv,
u8 oper, u8 reg, u8 reg_init_val, u8 *retval)
{
- u8 i, val1, uninitialized_var(val2);
+ u8 i, val1, val2;
int err;
qt1010_i2c_oper_t i2c_data[] = {
@@ -250,7 +250,7 @@ static int qt1010_init_meas1(struct qt1010_priv *priv,
static int qt1010_init_meas2(struct qt1010_priv *priv,
u8 reg_init_val, u8 *retval)
{
- u8 i, uninitialized_var(val);
+ u8 i, val;
int err;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, 0x07, reg_init_val },
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index 179b2ec3df57..d98343fd33fe 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -225,7 +225,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
{
int ret;
const struct ihex_binrec *rec;
- const struct firmware *uninitialized_var(fw);
+ const struct firmware *fw;
u8 *firmware_buf;
ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 8fa77a81dd7f..a65d5353a441 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -765,9 +765,9 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream,
unsigned int header_size;
bool has_pts = false;
bool has_scr = false;
- u16 uninitialized_var(scr_sof);
- u32 uninitialized_var(scr_stc);
- u32 uninitialized_var(pts);
+ u16 scr_sof;
+ u32 scr_stc;
+ u32 pts;
if (stream->stats.stream.nb_frames == 0 &&
stream->stats.frame.nb_packets == 0)
@@ -1828,7 +1828,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
struct usb_host_endpoint *best_ep = NULL;
unsigned int best_psize = UINT_MAX;
unsigned int bandwidth;
- unsigned int uninitialized_var(altsetting);
+ unsigned int altsetting;
int intfnum = stream->intfnum;
/* Isochronous endpoint, select the alternate setting. */
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 04368ee2a809..00060bdbf574 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -5,6 +5,12 @@
menuconfig MEMORY
bool "Memory Controller drivers"
+ help
+ This option allows to enable specific memory controller drivers,
+ useful mostly on embedded systems. These could be controllers
+ for DRAM (SDR, DDR), ROM, SRAM and others. The drivers features
+ vary from memory tuning and frequency scaling to enabling
+ access to attached peripherals through memory bus.
if MEMORY
@@ -174,6 +180,15 @@ config PL353_SMC
This driver is for the ARM PL351/PL353 Static Memory
Controller(SMC) module.
+config RENESAS_RPCIF
+ tristate "Renesas RPC-IF driver"
+ depends on ARCH_RENESAS
+ select REGMAP_MMIO
+ help
+ This supports Renesas R-Car Gen3 RPC-IF which provides either SPI
+ host or HyperFlash. You'll have to select individual components
+ under the corresponding menu.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 6d7e3e64ba62..d105f8ebe8b8 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_PL353_SMC) += pl353-smc.o
+obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 82b415be18d1..60e8633b1175 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -23,7 +23,7 @@
* - BE kernel + LE firmware image
* - BE kernel + BE firmware image
*
- * The DPCU always runs in big endian mode. The firwmare image, however, can
+ * The DPCU always runs in big endian mode. The firmware image, however, can
* be in either format. Also, communication between host CPU and DCPU is
* always in little endian.
*/
@@ -188,7 +188,7 @@ struct brcmstb_dpfe_priv {
struct mutex lock;
};
-static const char *error_text[] = {
+static const char * const error_text[] = {
"Success", "Header code incorrect", "Unknown command or argument",
"Incorrect checksum", "Malformed command", "Timed out",
};
@@ -379,9 +379,8 @@ static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
void __iomem *ptr = NULL;
/* There is no need to use this function for API v3 or later. */
- if (unlikely(priv->dpfe_api->version >= 3)) {
+ if (unlikely(priv->dpfe_api->version >= 3))
return NULL;
- }
msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
index 633fea6a4edf..85965fa26e0b 100644
--- a/drivers/memory/bt1-l2-ctl.c
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -66,6 +66,7 @@ struct l2_ctl_device_attribute {
struct device_attribute dev_attr;
enum l2_ctl_stall id;
};
+
#define to_l2_ctl_dev_attr(_dev_attr) \
container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
@@ -242,6 +243,7 @@ static ssize_t l2_ctl_latency_store(struct device *dev,
return count;
}
+
static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
index e8f9b3f461f5..872addd0ec60 100644
--- a/drivers/memory/da8xx-ddrctl.c
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -102,14 +102,12 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev)
{
const struct da8xx_ddrctl_config_knob *knob;
const struct da8xx_ddrctl_setting *setting;
- struct device_node *node;
struct resource *res;
void __iomem *ddrctl;
struct device *dev;
u32 reg;
dev = &pdev->dev;
- node = dev->of_node;
setting = da8xx_ddrctl_get_board_settings();
if (!setting) {
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index db8043019ec6..4b98d1854cd7 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TI AM33XX EMIF PM Assembly Offsets
*
* Copyright (C) 2016-2017 Texas Instruments Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/ti-emif-sram.h>
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 9d9127bf2a59..bb6a71d26798 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -282,10 +282,9 @@ static void set_lpmode(struct emif_data *emif, u8 lpmode)
* the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
*/
if ((emif->plat_data->ip_rev == EMIF_4D) &&
- (EMIF_LP_MODE_PWR_DN == lpmode)) {
+ (lpmode == EMIF_LP_MODE_PWR_DN)) {
WARN_ONCE(1,
- "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by"
- "erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
+ "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
/* rollback LP_MODE to Self-refresh mode */
lpmode = EMIF_LP_MODE_SELF_REFRESH;
}
@@ -714,7 +713,7 @@ static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 |
fifo_we_slave_ratio << 22;
@@ -725,7 +724,7 @@ static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 |
fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23;
@@ -736,7 +735,7 @@ static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void)
u32 fifo_we_slave_ratio;
fifo_we_slave_ratio = DIV_ROUND_CLOSEST(
- EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256 , t_ck);
+ EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck);
return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 |
fifo_we_slave_ratio << 13;
@@ -975,8 +974,7 @@ static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
dev_err(emif->dev,
- "%s:NOT Extended temperature capable memory."
- "Converting MR4=0x%02x as shutdown event\n",
+ "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
__func__, emif->temperature_level);
/*
* Temperature far too high - do kernel_power_off()
@@ -1318,9 +1316,9 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
dev_info->cal_resistors_per_cs = true;
- if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s4"))
+ if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
dev_info->type = DDR_TYPE_LPDDR2_S4;
- else if (of_device_is_compatible(np_ddr , "jedec,lpddr2-s2"))
+ else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
dev_info->type = DDR_TYPE_LPDDR2_S2;
of_property_read_u32(np_ddr, "density", &density);
@@ -1563,11 +1561,8 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
goto error;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(emif->dev, "%s: error getting IRQ resource - %d\n",
- __func__, irq);
+ if (irq < 0)
goto error;
- }
emif_onetime_settings(emif);
emif_debugfs_init(emif);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index a2c971743ffe..89f99b5b6450 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -53,6 +53,7 @@ int fsl_ifc_find(phys_addr_t addr_base)
for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
+
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
return i;
@@ -153,8 +154,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
/* read for chip select error */
cs_err = ifc_in32(&ifc->cm_evter_stat);
if (cs_err) {
- dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
- "any memory bank 0x%08X\n", cs_err);
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to any memory bank 0x%08X\n",
+ cs_err);
/* clear the chip select error */
ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
@@ -163,24 +164,24 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
err_addr = ifc_in32(&ifc->cm_erattr1);
if (status & IFC_CM_ERATTR0_ERTYP_READ)
- dev_err(ctrl->dev, "Read transaction error"
- "CM_ERATTR0 0x%08X\n", status);
+ dev_err(ctrl->dev, "Read transaction error CM_ERATTR0 0x%08X\n",
+ status);
else
- dev_err(ctrl->dev, "Write transaction error"
- "CM_ERATTR0 0x%08X\n", status);
+ dev_err(ctrl->dev, "Write transaction error CM_ERATTR0 0x%08X\n",
+ status);
err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
IFC_CM_ERATTR0_ERAID_SHIFT;
- dev_err(ctrl->dev, "AXI ID of the error"
- "transaction 0x%08X\n", err_axiid);
+ dev_err(ctrl->dev, "AXI ID of the error transaction 0x%08X\n",
+ err_axiid);
err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
IFC_CM_ERATTR0_ESRCID_SHIFT;
- dev_err(ctrl->dev, "SRC ID of the error"
- "transaction 0x%08X\n", err_srcid);
+ dev_err(ctrl->dev, "SRC ID of the error transaction 0x%08X\n",
+ err_srcid);
- dev_err(ctrl->dev, "Transaction Address corresponding to error"
- "ERADDR 0x%08X\n", err_addr);
+ dev_err(ctrl->dev, "Transaction Address corresponding to error ERADDR 0x%08X\n",
+ err_addr);
ret = IRQ_HANDLED;
}
@@ -199,7 +200,7 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
* the resources needed for the controller only. The
* resources for the NAND banks themselves are allocated
* in the chip probe function.
-*/
+ */
static int fsl_ifc_ctrl_probe(struct platform_device *dev)
{
int ret = 0;
@@ -250,8 +251,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (fsl_ifc_ctrl_dev->irq == 0) {
- dev_err(&dev->dev, "failed to get irq resource "
- "for IFC\n");
+ dev_err(&dev->dev, "failed to get irq resource for IFC\n");
ret = -ENODEV;
goto err;
}
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index b232ed279fc3..3ec5cb0fce1e 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -22,6 +23,8 @@
#define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4))
#define NEMC_NFCSR 0x50
+#define NEMC_REG_LEN 0x54
+
#define NEMC_SMCR_SMT BIT(0)
#define NEMC_SMCR_BW_SHIFT 6
#define NEMC_SMCR_BW_MASK (0x3 << NEMC_SMCR_BW_SHIFT)
@@ -288,7 +291,19 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
nemc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nemc->base = devm_ioremap_resource(dev, res);
+
+ /*
+ * The driver currently only uses the registers up to offset
+ * NEMC_REG_LEN. Since the EFUSE registers are in the middle of the
+ * NEMC registers, we only request the registers we will use for now;
+ * that way the EFUSE driver can probe too.
+ */
+ if (!devm_request_mem_region(dev, res->start, NEMC_REG_LEN, dev_name(dev))) {
+ dev_err(dev, "unable to request I/O memory region\n");
+ return -EBUSY;
+ }
+
+ nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
if (IS_ERR(nemc->base)) {
dev_err(dev, "failed to get I/O memory\n");
return PTR_ERR(nemc->base);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index a113e811faab..e154bea3cf14 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -60,7 +60,7 @@ struct mtk_smi_common_plat {
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
- void (*config_port)(struct device *);
+ void (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
bool has_gals;
};
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 886aea587276..8450638e8670 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -124,32 +124,32 @@ static int devbus_get_timing_params(struct devbus *devbus,
* The bus width is encoded into the register as 0 for 8 bits,
* and 1 for 16 bits, so we do the necessary conversion here.
*/
- if (r->bus_width == 8)
+ if (r->bus_width == 8) {
r->bus_width = 0;
- else if (r->bus_width == 16)
+ } else if (r->bus_width == 16) {
r->bus_width = 1;
- else {
+ } else {
dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
return -EINVAL;
}
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
- &r->badr_skew);
+ &r->badr_skew);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
- &r->turn_off);
+ &r->turn_off);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
- &r->acc_first);
+ &r->acc_first);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
- &r->acc_next);
+ &r->acc_next);
if (err < 0)
return err;
@@ -175,17 +175,17 @@ static int devbus_get_timing_params(struct devbus *devbus,
}
err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
- &w->ale_wr);
+ &w->ale_wr);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
- &w->wr_low);
+ &w->wr_low);
if (err < 0)
return err;
err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
- &w->wr_high);
+ &w->wr_high);
if (err < 0)
return err;
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 71f26eac7350..d9f5437d3bce 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -4,11 +4,10 @@
*
* Copyright (C) 2012 Texas Instruments, Inc.
* Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
*/
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/list.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/export.h>
@@ -19,7 +18,7 @@
/**
* of_get_min_tck() - extract min timing values for ddr
* @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
*
* Populates the lpddr2_min_tck structure by extracting data
* from device tree node. Returns a pointer to the populated
@@ -27,7 +26,7 @@
* default min timings provided by JEDEC.
*/
const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
- struct device *dev)
+ struct device *dev)
{
int ret = 0;
struct lpddr2_min_tck *min;
@@ -56,13 +55,13 @@ const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
return min;
default_min_tck:
- dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ dev_warn(dev, "Using default min-tck values\n");
return &lpddr2_jedec_min_tck;
}
EXPORT_SYMBOL(of_get_min_tck);
static int of_do_get_timings(struct device_node *np,
- struct lpddr2_timings *tim)
+ struct lpddr2_timings *tim)
{
int ret;
@@ -84,7 +83,7 @@ static int of_do_get_timings(struct device_node *np,
ret |= of_property_read_u32(np, "tZQinit", &tim->tZQinit);
ret |= of_property_read_u32(np, "tRAS-max-ns", &tim->tRAS_max_ns);
ret |= of_property_read_u32(np, "tDQSCK-max-derated",
- &tim->tDQSCK_max_derated);
+ &tim->tDQSCK_max_derated);
return ret;
}
@@ -103,7 +102,9 @@ static int of_do_get_timings(struct device_node *np,
* while populating, returns default timings provided by JEDEC.
*/
const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
- struct device *dev, u32 device_type, u32 *nr_frequencies)
+ struct device *dev,
+ u32 device_type,
+ u32 *nr_frequencies)
{
struct lpddr2_timings *timings = NULL;
u32 arr_sz = 0, i = 0;
@@ -116,7 +117,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
tim_compat = "jedec,lpddr2-timings";
break;
default:
- dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ dev_warn(dev, "Unsupported memory type\n");
}
for_each_child_of_node(np_ddr, np_tim)
@@ -145,7 +146,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
return timings;
default_timings:
- dev_warn(dev, "%s: using default timings\n", __func__);
+ dev_warn(dev, "Using default memory timings\n");
*nr_frequencies = ARRAY_SIZE(lpddr2_jedec_timings);
return lpddr2_jedec_timings;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL(of_get_ddr_timings);
/**
* of_lpddr3_get_min_tck() - extract min timing values for lpddr3
* @np: pointer to ddr device tree node
- * @device: device requesting for min timing values
+ * @dev: device requesting for min timing values
*
* Populates the lpddr3_min_tck structure by extracting data
* from device tree node. Returns a pointer to the populated
@@ -193,8 +194,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
if (ret) {
- dev_warn(dev, "%s: errors while parsing min-tck values\n",
- __func__);
+ dev_warn(dev, "Errors while parsing min-tck values\n");
devm_kfree(dev, min);
goto default_min_tck;
}
@@ -202,7 +202,7 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
return min;
default_min_tck:
- dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ dev_warn(dev, "Using default min-tck values\n");
return NULL;
}
EXPORT_SYMBOL(of_lpddr3_get_min_tck);
@@ -264,7 +264,7 @@ const struct lpddr3_timings
tim_compat = "jedec,lpddr3-timings";
break;
default:
- dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ dev_warn(dev, "Unsupported memory type\n");
}
for_each_child_of_node(np_ddr, np_tim)
@@ -293,7 +293,7 @@ const struct lpddr3_timings
return timings;
default_timings:
- dev_warn(dev, "%s: failed to get timings\n", __func__);
+ dev_warn(dev, "Failed to get timings\n");
*nr_frequencies = 0;
return NULL;
}
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index e39ecc4c733d..4a99b232ab0a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -3,22 +3,23 @@
* OpenFirmware helpers for memory drivers
*
* Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
*/
#ifndef __LINUX_MEMORY_OF_REG_H
#define __LINUX_MEMORY_OF_REG_H
#if defined(CONFIG_OF) && defined(CONFIG_DDR)
-extern const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
- struct device *dev);
-extern const struct lpddr2_timings
- *of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
- u32 device_type, u32 *nr_frequencies);
-extern const struct lpddr3_min_tck
- *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev);
-extern const struct lpddr3_timings
- *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
- struct device *dev, u32 device_type, u32 *nr_frequencies);
+const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev,
+ u32 device_type, u32 *nr_frequencies);
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr3_timings *
+of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index eff26c1b1394..f512cbc7a36c 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -29,6 +29,7 @@
#include <linux/of_platform.h>
#include <linux/omap-gpmc.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -108,8 +109,8 @@
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE 2
-#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
-#define GPMC_REVISION_MINOR(l) (l & 0xf)
+#define GPMC_REVISION_MAJOR(l) (((l) >> 4) & 0xf)
+#define GPMC_REVISION_MINOR(l) ((l) & 0xf)
#define GPMC_HAS_WR_ACCESS 0x1
#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
@@ -140,27 +141,27 @@
#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
-#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
+#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) (((val) & 3) << 25)
/** CLKACTIVATIONTIME Max Ticks */
#define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
-#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
+#define GPMC_CONFIG1_PAGE_LEN(val) (((val) & 3) << 23)
/** ATTACHEDDEVICEPAGELENGTH Max Value */
#define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
-#define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
+#define GPMC_CONFIG1_WAIT_MON_TIME(val) (((val) & 3) << 18)
/** WAITMONITORINGTIME Max Ticks */
#define GPMC_CONFIG1_WAITMONITORINGTIME_MAX 2
-#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
-#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
+#define GPMC_CONFIG1_WAIT_PIN_SEL(val) (((val) & 3) << 16)
+#define GPMC_CONFIG1_DEVICESIZE(val) (((val) & 3) << 12)
#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
/** DEVICESIZE Max Value */
#define GPMC_CONFIG1_DEVICESIZE_MAX 1
-#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
+#define GPMC_CONFIG1_DEVICETYPE(val) (((val) & 3) << 10)
#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
-#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
+#define GPMC_CONFIG1_MUXTYPE(val) (((val) & 3) << 8)
#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
-#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
+#define GPMC_CONFIG1_FCLK_DIV(val) ((val) & 3)
#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
@@ -245,7 +246,7 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
static unsigned int gpmc_cs_num = GPMC_CS_NUM;
static unsigned int gpmc_nr_waitpins;
static resource_size_t phys_base, mem_size;
-static unsigned gpmc_capability;
+static unsigned int gpmc_capability;
static void __iomem *gpmc_base;
static struct clk *gpmc_l3_clk;
@@ -291,15 +292,14 @@ static unsigned long gpmc_get_fclk_period(void)
/**
* gpmc_get_clk_period - get period of selected clock domain in ps
- * @cs Chip Select Region.
- * @cd Clock Domain.
+ * @cs: Chip Select Region.
+ * @cd: Clock Domain.
*
* GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
* prior to calling this function with GPMC_CD_CLK.
*/
static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
{
-
unsigned long tick_ps = gpmc_get_fclk_period();
u32 l;
int div;
@@ -319,7 +319,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
}
return tick_ps;
-
}
static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
@@ -411,7 +410,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
* @reg: GPMC_CS_CONFIGn register offset.
* @st_bit: Start Bit
* @end_bit: End Bit. Must be >= @st_bit.
- * @ma:x Maximum parameter value (before optional @shift).
+ * @max: Maximum parameter value (before optional @shift).
* If 0, maximum is as high as @st_bit and @end_bit allow.
* @name: DTS node name, w/o "gpmc,"
* @cd: Clock Domain of timing parameter.
@@ -511,7 +510,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
- GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
+ GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
@@ -625,9 +624,8 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
l = gpmc_cs_read_reg(cs, reg);
#ifdef CONFIG_OMAP_GPMC_DEBUG
- pr_info(
- "GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
- cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
+ pr_info("GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
+ cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
(l >> st_bit) & mask, time);
#endif
l &= ~(mask << st_bit);
@@ -662,7 +660,6 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
*/
static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
{
-
int div = gpmc_ns_to_ticks(wait_monitoring);
div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
@@ -674,7 +671,6 @@ static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
div = 1;
return div;
-
}
/**
@@ -728,7 +724,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
if (!s->sync_read && !s->sync_write &&
(s->wait_on_read || s->wait_on_write)
) {
-
div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
if (div < 0) {
pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
@@ -958,7 +953,7 @@ static int gpmc_cs_remap(int cs, u32 base)
* Make sure we ignore any device offsets from the GPMC partition
* allocated for the chip select and that the new base confirms
* to the GPMC 16MB minimum granularity.
- */
+ */
base &= ~(SZ_16M - 1);
gpmc_cs_get_memconf(cs, &old_base, &size);
@@ -1087,7 +1082,7 @@ static struct gpmc_nand_ops nand_ops = {
/**
* gpmc_omap_get_nand_ops - Get the GPMC NAND interface
- * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @reg: the GPMC NAND register map exclusive for NAND use.
* @cs: GPMC chip select number on which the NAND sits. The
* register map returned will be specific to this chip select.
*
@@ -1242,7 +1237,7 @@ int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
}
EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
-int gpmc_get_client_irq(unsigned irq_config)
+int gpmc_get_client_irq(unsigned int irq_config)
{
if (!gpmc_irq_domain) {
pr_warn("%s called before GPMC IRQ domain available\n",
@@ -1465,7 +1460,6 @@ static void gpmc_mem_exit(void)
continue;
gpmc_cs_delete_mem(cs);
}
-
}
static void gpmc_mem_init(void)
@@ -1634,17 +1628,14 @@ static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
/* oe_on */
temp = dev_t->t_oeasu;
if (mux)
- temp = max_t(u32, temp,
- gpmc_t->adv_rd_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off + dev_t->t_aavdh);
gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
/* access */
temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
- gpmc_t->oe_on + dev_t->t_oe);
- temp = max_t(u32, temp,
- gpmc_t->cs_on + dev_t->t_ce);
- temp = max_t(u32, temp,
- gpmc_t->adv_on + dev_t->t_aa);
+ gpmc_t->oe_on + dev_t->t_oe);
+ temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce);
+ temp = max_t(u32, temp, gpmc_t->adv_on + dev_t->t_aa);
gpmc_t->access = gpmc_round_ps_to_ticks(temp);
gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
@@ -1753,10 +1744,11 @@ static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
return 0;
}
-/* TODO: remove this function once all peripherals are confirmed to
+/*
+ * TODO: remove this function once all peripherals are confirmed to
* work with generic timing. Simultaneously gpmc_cs_set_timings()
* has to be modified to handle timings in ps instead of ns
-*/
+ */
static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
{
t->cs_on /= 1000;
@@ -2089,7 +2081,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
gpmc_cs_disable_mem(cs);
/*
- * FIXME: gpmc_cs_request() will map the CS to an arbitary
+ * FIXME: gpmc_cs_request() will map the CS to an arbitrary
* location in the gpmc address space. When booting with
* device-tree we want the NOR flash to be mapped to the
* location specified in the device-tree blob. So remap the
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
index ff57195b4e37..575fadbffa30 100644
--- a/drivers/memory/pl172.c
+++ b/drivers/memory/pl172.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Memory controller driver for ARM PrimeCell PL172
* PrimeCell MultiPort Memory Controller (PL172)
@@ -6,10 +7,6 @@
*
* Based on:
* TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/amba/bus.h>
@@ -24,7 +21,7 @@
#include <linux/of_platform.h>
#include <linux/time.h>
-#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * n)
+#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * (n))
#define MPMC_STATIC_CFG_MW_8BIT 0x0
#define MPMC_STATIC_CFG_MW_16BIT 0x1
#define MPMC_STATIC_CFG_MW_32BIT 0x2
@@ -34,17 +31,17 @@
#define MPMC_STATIC_CFG_EW BIT(8)
#define MPMC_STATIC_CFG_B BIT(19)
#define MPMC_STATIC_CFG_P BIT(20)
-#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * (n))
#define MPMC_STATIC_WAIT_WEN_MAX 0x0f
-#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * n)
+#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * (n))
#define MPMC_STATIC_WAIT_OEN_MAX 0x0f
-#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * n)
+#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * (n))
#define MPMC_STATIC_WAIT_RD_MAX 0x1f
-#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * n)
+#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * (n))
#define MPMC_STATIC_WAIT_PAGE_MAX 0x1f
-#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * n)
+#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * (n))
#define MPMC_STATIC_WAIT_WR_MAX 0x1f
-#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * n)
+#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * (n))
#define MPMC_STATIC_WAIT_TURN_MAX 0x0f
/* Maximum number of static chip selects */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
new file mode 100644
index 000000000000..88f51ec8f1d1
--- /dev/null
+++ b/drivers/memory/renesas-rpc-if.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RPC-IF core driver
+ *
+ * Copyright (C) 2018-2019 Renesas Solutions Corp.
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <memory/renesas-rpc-if.h>
+
+#define RPCIF_CMNCR 0x0000 /* R/W */
+#define RPCIF_CMNCR_MD BIT(31)
+#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */
+#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
+#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
+#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
+#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
+#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \
+ RPCIF_CMNCR_MOIIO1(3) | \
+ RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3))
+#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */
+#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */
+#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
+#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \
+ RPCIF_CMNCR_IO3FV(3))
+#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
+
+#define RPCIF_SSLDR 0x0004 /* R/W */
+#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
+#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
+#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
+
+#define RPCIF_DRCR 0x000C /* R/W */
+#define RPCIF_DRCR_SSLN BIT(24)
+#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16)
+#define RPCIF_DRCR_RCF BIT(9)
+#define RPCIF_DRCR_RBE BIT(8)
+#define RPCIF_DRCR_SSLE BIT(0)
+
+#define RPCIF_DRCMR 0x0010 /* R/W */
+#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_DREAR 0x0014 /* R/W */
+#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16)
+#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0)
+
+#define RPCIF_DROPR 0x0018 /* R/W */
+
+#define RPCIF_DRENR 0x001C /* R/W */
+#define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30))
+#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16)
+#define RPCIF_DRENR_DME BIT(15)
+#define RPCIF_DRENR_CDE BIT(14)
+#define RPCIF_DRENR_OCDE BIT(12)
+#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4)
+
+#define RPCIF_SMCR 0x0020 /* R/W */
+#define RPCIF_SMCR_SSLKP BIT(8)
+#define RPCIF_SMCR_SPIRE BIT(2)
+#define RPCIF_SMCR_SPIWE BIT(1)
+#define RPCIF_SMCR_SPIE BIT(0)
+
+#define RPCIF_SMCMR 0x0024 /* R/W */
+#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_SMADR 0x0028 /* R/W */
+
+#define RPCIF_SMOPR 0x002C /* R/W */
+#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
+#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
+
+#define RPCIF_SMENR 0x0030 /* R/W */
+#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30)
+#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPCIF_SMENR_DME BIT(15)
+#define RPCIF_SMENR_CDE BIT(14)
+#define RPCIF_SMENR_OCDE BIT(12)
+#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4)
+#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0)
+
+#define RPCIF_SMRDR0 0x0038 /* R */
+#define RPCIF_SMRDR1 0x003C /* R */
+#define RPCIF_SMWDR0 0x0040 /* W */
+#define RPCIF_SMWDR1 0x0044 /* W */
+
+#define RPCIF_CMNSR 0x0048 /* R */
+#define RPCIF_CMNSR_SSLF BIT(1)
+#define RPCIF_CMNSR_TEND BIT(0)
+
+#define RPCIF_DRDMCR 0x0058 /* R/W */
+#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_DRDRENR 0x005C /* R/W */
+#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_DRDRENR_ADDRE BIT(8)
+#define RPCIF_DRDRENR_OPDRE BIT(4)
+#define RPCIF_DRDRENR_DRDRE BIT(0)
+
+#define RPCIF_SMDMCR 0x0060 /* R/W */
+#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_SMDRENR 0x0064 /* R/W */
+#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_SMDRENR_ADDRE BIT(8)
+#define RPCIF_SMDRENR_OPDRE BIT(4)
+#define RPCIF_SMDRENR_SPIDRE BIT(0)
+
+#define RPCIF_PHYCNT 0x007C /* R/W */
+#define RPCIF_PHYCNT_CAL BIT(31)
+#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
+#define RPCIF_PHYCNT_EXDS BIT(21)
+#define RPCIF_PHYCNT_OCT BIT(20)
+#define RPCIF_PHYCNT_DDRCAL BIT(19)
+#define RPCIF_PHYCNT_HS BIT(18)
+#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15)
+#define RPCIF_PHYCNT_WBUF2 BIT(4)
+#define RPCIF_PHYCNT_WBUF BIT(2)
+#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
+
+#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
+#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
+
+#define RPCIF_PHYOFFSET2 0x0084 /* R/W */
+#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8)
+
+#define RPCIF_PHYINT 0x0088 /* R/W */
+#define RPCIF_PHYINT_WPVAL BIT(1)
+
+#define RPCIF_DIRMAP_SIZE 0x4000000
+
+static const struct regmap_range rpcif_volatile_ranges[] = {
+ regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
+ regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
+ regmap_reg_range(RPCIF_CMNSR, RPCIF_CMNSR),
+};
+
+static const struct regmap_access_table rpcif_volatile_table = {
+ .yes_ranges = rpcif_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
+};
+
+static const struct regmap_config rpcif_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .max_register = RPCIF_PHYINT,
+ .volatile_table = &rpcif_volatile_table,
+};
+
+int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ void __iomem *base;
+
+ rpc->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rpc->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &rpcif_regmap_config);
+ if (IS_ERR(rpc->regmap)) {
+ dev_err(&pdev->dev,
+ "failed to init regmap for rpcif, error %ld\n",
+ PTR_ERR(rpc->regmap));
+ return PTR_ERR(rpc->regmap);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+ rpc->size = resource_size(res);
+ rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rpc->dirmap))
+ rpc->dirmap = NULL;
+
+ rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rpc->rstc))
+ return PTR_ERR(rpc->rstc);
+
+ return 0;
+}
+EXPORT_SYMBOL(rpcif_sw_init);
+
+void rpcif_enable_rpm(struct rpcif *rpc)
+{
+ pm_runtime_enable(rpc->dev);
+}
+EXPORT_SYMBOL(rpcif_enable_rpm);
+
+void rpcif_disable_rpm(struct rpcif *rpc)
+{
+ pm_runtime_put_sync(rpc->dev);
+}
+EXPORT_SYMBOL(rpcif_disable_rpm);
+
+void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+{
+ u32 dummy;
+
+ pm_runtime_get_sync(rpc->dev);
+
+ /*
+ * NOTE: The 0x260 are undocumented bits, but they must be set.
+ * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits,
+ * 0x0 : the delay is biggest,
+ * 0x1 : the delay is 2nd biggest,
+ * On H3 ES1.x, the value should be 0, while on others,
+ * the value should be 7.
+ */
+ regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) |
+ RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260);
+
+ /*
+ * NOTE: The 0x1511144 are undocumented bits, but they must be set
+ * for RPCIF_PHYOFFSET1.
+ * The 0x31 are undocumented bits, but they must be set
+ * for RPCIF_PHYOFFSET2.
+ */
+ regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 |
+ RPCIF_PHYOFFSET1_DDRTMG(3));
+ regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 |
+ RPCIF_PHYOFFSET2_OCTTMG(4));
+
+ if (hyperflash)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
+ RPCIF_PHYINT_WPVAL, 0);
+
+ regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE |
+ RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ /* Set RCF after BSZ update */
+ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
+ /* Dummy read according to spec */
+ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
+ regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) |
+ RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7));
+
+ pm_runtime_put(rpc->dev);
+
+ rpc->bus_size = hyperflash ? 2 : 1;
+}
+EXPORT_SYMBOL(rpcif_hw_init);
+
+static int wait_msg_xfer_end(struct rpcif *rpc)
+{
+ u32 sts;
+
+ return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts,
+ sts & RPCIF_CMNSR_TEND, 0,
+ USEC_PER_SEC);
+}
+
+static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes)
+{
+ if (rpc->bus_size == 2)
+ nbytes /= 2;
+ nbytes = clamp(nbytes, 1U, 4U);
+ return GENMASK(3, 4 - nbytes);
+}
+
+static u8 rpcif_bit_size(u8 buswidth)
+{
+ return buswidth > 4 ? 2 : ilog2(buswidth);
+}
+
+void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+ size_t *len)
+{
+ rpc->smcr = 0;
+ rpc->smadr = 0;
+ rpc->enable = 0;
+ rpc->command = 0;
+ rpc->option = 0;
+ rpc->dummy = 0;
+ rpc->ddr = 0;
+ rpc->xferlen = 0;
+
+ if (op->cmd.buswidth) {
+ rpc->enable = RPCIF_SMENR_CDE |
+ RPCIF_SMENR_CDB(rpcif_bit_size(op->cmd.buswidth));
+ rpc->command = RPCIF_SMCMR_CMD(op->cmd.opcode);
+ if (op->cmd.ddr)
+ rpc->ddr = RPCIF_SMDRENR_HYPE(0x5);
+ }
+ if (op->ocmd.buswidth) {
+ rpc->enable |= RPCIF_SMENR_OCDE |
+ RPCIF_SMENR_OCDB(rpcif_bit_size(op->ocmd.buswidth));
+ rpc->command |= RPCIF_SMCMR_OCMD(op->ocmd.opcode);
+ }
+
+ if (op->addr.buswidth) {
+ rpc->enable |=
+ RPCIF_SMENR_ADB(rpcif_bit_size(op->addr.buswidth));
+ if (op->addr.nbytes == 4)
+ rpc->enable |= RPCIF_SMENR_ADE(0xF);
+ else
+ rpc->enable |= RPCIF_SMENR_ADE(GENMASK(
+ 2, 3 - op->addr.nbytes));
+ if (op->addr.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_ADDRE;
+
+ if (offs && len)
+ rpc->smadr = *offs;
+ else
+ rpc->smadr = op->addr.val;
+ }
+
+ if (op->dummy.buswidth) {
+ rpc->enable |= RPCIF_SMENR_DME;
+ rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles /
+ op->dummy.buswidth);
+ }
+
+ if (op->option.buswidth) {
+ rpc->enable |= RPCIF_SMENR_OPDE(
+ rpcif_bits_set(rpc, op->option.nbytes)) |
+ RPCIF_SMENR_OPDB(rpcif_bit_size(op->option.buswidth));
+ if (op->option.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_OPDRE;
+ rpc->option = op->option.val;
+ }
+
+ rpc->dir = op->data.dir;
+ if (op->data.buswidth) {
+ u32 nbytes;
+
+ rpc->buffer = op->data.buf.in;
+ switch (op->data.dir) {
+ case RPCIF_DATA_IN:
+ rpc->smcr = RPCIF_SMCR_SPIRE;
+ break;
+ case RPCIF_DATA_OUT:
+ rpc->smcr = RPCIF_SMCR_SPIWE;
+ break;
+ default:
+ break;
+ }
+ if (op->data.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_SPIDRE;
+
+ if (offs && len)
+ nbytes = *len;
+ else
+ nbytes = op->data.nbytes;
+ rpc->xferlen = nbytes;
+
+ rpc->enable |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)) |
+ RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
+ }
+}
+EXPORT_SYMBOL(rpcif_prepare);
+
+int rpcif_manual_xfer(struct rpcif *rpc)
+{
+ u32 smenr, smcr, pos = 0, max = 4;
+ int ret = 0;
+
+ if (rpc->bus_size == 2)
+ max = 8;
+
+ pm_runtime_get_sync(rpc->dev);
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL);
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MD, RPCIF_CMNCR_MD);
+ regmap_write(rpc->regmap, RPCIF_SMCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
+ smenr = rpc->enable;
+
+ switch (rpc->dir) {
+ case RPCIF_DATA_OUT:
+ while (pos < rpc->xferlen) {
+ u32 nbytes = rpc->xferlen - pos;
+ u32 data[2];
+
+ smcr = rpc->smcr | RPCIF_SMCR_SPIE;
+ if (nbytes > max) {
+ nbytes = max;
+ smcr |= RPCIF_SMCR_SSLKP;
+ }
+
+ memcpy(data, rpc->buffer + pos, nbytes);
+ if (nbytes > 4) {
+ regmap_write(rpc->regmap, RPCIF_SMWDR1,
+ data[0]);
+ regmap_write(rpc->regmap, RPCIF_SMWDR0,
+ data[1]);
+ } else if (nbytes > 2) {
+ regmap_write(rpc->regmap, RPCIF_SMWDR0,
+ data[0]);
+ } else {
+ regmap_write(rpc->regmap, RPCIF_SMWDR0,
+ data[0] << 16);
+ }
+
+ regmap_write(rpc->regmap, RPCIF_SMADR,
+ rpc->smadr + pos);
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+
+ pos += nbytes;
+ smenr = rpc->enable &
+ ~RPCIF_SMENR_CDE & ~RPCIF_SMENR_ADE(0xF);
+ }
+ break;
+ case RPCIF_DATA_IN:
+ /*
+ * RPC-IF spoils the data for the commands without an address
+ * phase (like RDID) in the manual mode, so we'll have to work
+ * around this issue by using the external address space read
+ * mode instead.
+ */
+ if (!(smenr & RPCIF_SMENR_ADE(0xF)) && rpc->dirmap) {
+ u32 dummy;
+
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MD, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCR,
+ RPCIF_DRCR_RBURST(32) | RPCIF_DRCR_RBE);
+ regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_DREAR,
+ RPCIF_DREAR_EAC(1));
+ regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_DRENR,
+ smenr & ~RPCIF_SMENR_SPIDE(0xF));
+ regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
+ memcpy_fromio(rpc->buffer, rpc->dirmap, rpc->xferlen);
+ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
+ /* Dummy read according to spec */
+ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
+ break;
+ }
+ while (pos < rpc->xferlen) {
+ u32 nbytes = rpc->xferlen - pos;
+ u32 data[2];
+
+ if (nbytes > max)
+ nbytes = max;
+
+ regmap_write(rpc->regmap, RPCIF_SMADR,
+ rpc->smadr + pos);
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ regmap_write(rpc->regmap, RPCIF_SMCR,
+ rpc->smcr | RPCIF_SMCR_SPIE);
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+
+ if (nbytes > 4) {
+ regmap_read(rpc->regmap, RPCIF_SMRDR1,
+ &data[0]);
+ regmap_read(rpc->regmap, RPCIF_SMRDR0,
+ &data[1]);
+ } else if (nbytes > 2) {
+ regmap_read(rpc->regmap, RPCIF_SMRDR0,
+ &data[0]);
+ } else {
+ regmap_read(rpc->regmap, RPCIF_SMRDR0,
+ &data[0]);
+ data[0] >>= 16;
+ }
+ memcpy(rpc->buffer + pos, data, nbytes);
+
+ pos += nbytes;
+ }
+ break;
+ default:
+ regmap_write(rpc->regmap, RPCIF_SMENR, rpc->enable);
+ regmap_write(rpc->regmap, RPCIF_SMCR,
+ rpc->smcr | RPCIF_SMCR_SPIE);
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+ }
+
+exit:
+ pm_runtime_put(rpc->dev);
+ return ret;
+
+err_out:
+ ret = reset_control_reset(rpc->rstc);
+ rpcif_hw_init(rpc, rpc->bus_size == 2);
+ goto exit;
+}
+EXPORT_SYMBOL(rpcif_manual_xfer);
+
+ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
+{
+ loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
+ size_t size = RPCIF_DIRMAP_SIZE - from;
+
+ if (len > size)
+ len = size;
+
+ pm_runtime_get_sync(rpc->dev);
+
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCR, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_DREAR,
+ RPCIF_DREAR_EAV(offs >> 25) | RPCIF_DREAR_EAC(1));
+ regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_DRENR,
+ rpc->enable & ~RPCIF_SMENR_SPIDE(0xF));
+ regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
+
+ memcpy_fromio(buf, rpc->dirmap + from, len);
+
+ pm_runtime_put(rpc->dev);
+
+ return len;
+}
+EXPORT_SYMBOL(rpcif_dirmap_read);
+
+static int rpcif_probe(struct platform_device *pdev)
+{
+ struct platform_device *vdev;
+ struct device_node *flash;
+ const char *name;
+
+ flash = of_get_next_child(pdev->dev.of_node, NULL);
+ if (!flash) {
+ dev_warn(&pdev->dev, "no flash node found\n");
+ return -ENODEV;
+ }
+
+ if (of_device_is_compatible(flash, "jedec,spi-nor")) {
+ name = "rpc-if-spi";
+ } else if (of_device_is_compatible(flash, "cfi-flash")) {
+ name = "rpc-if-hyperflash";
+ } else {
+ dev_warn(&pdev->dev, "unknown flash type\n");
+ return -ENODEV;
+ }
+
+ vdev = platform_device_alloc(name, pdev->id);
+ if (!vdev)
+ return -ENOMEM;
+ vdev->dev.parent = &pdev->dev;
+ platform_set_drvdata(pdev, vdev);
+ return platform_device_add(vdev);
+}
+
+static int rpcif_remove(struct platform_device *pdev)
+{
+ struct platform_device *vdev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(vdev);
+
+ return 0;
+}
+
+static const struct of_device_id rpcif_of_match[] = {
+ { .compatible = "renesas,rcar-gen3-rpc-if", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpcif_of_match);
+
+static struct platform_driver rpcif_driver = {
+ .probe = rpcif_probe,
+ .remove = rpcif_remove,
+ .driver = {
+ .name = "rpc-if",
+ .of_match_table = rpcif_of_match,
+ },
+};
+module_platform_driver(rpcif_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 20a8406ce786..8e240f078afc 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -23,5 +23,12 @@ config EXYNOS5422_DMC
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+ help
+ This adds driver for Samsung Exynos SoC SROM controller. The driver
+ in basic operation mode only saves and restores SROM registers
+ during suspend. If however appropriate device tree configuration
+ is provided, the driver enables support for external memory
+ or external devices.
+ If unsure, say Y on devices with Samsung Exynos SocS.
endif
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index 6510d7bab217..e73dd330af47 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -47,9 +47,9 @@ struct exynos_srom {
struct exynos_srom_reg_dump *reg_offset;
};
-static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
- const unsigned long *rdump,
- unsigned long nr_rdump)
+static struct exynos_srom_reg_dump *
+exynos_srom_alloc_reg_dump(const unsigned long *rdump,
+ unsigned long nr_rdump)
{
struct exynos_srom_reg_dump *rd;
unsigned int i;
@@ -116,7 +116,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
}
srom = devm_kzalloc(&pdev->dev,
- sizeof(struct exynos_srom), GFP_KERNEL);
+ sizeof(struct exynos_srom), GFP_KERNEL);
if (!srom)
return -ENOMEM;
@@ -130,7 +130,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
if (!srom->reg_offset) {
iounmap(srom->reg_base);
return -ENOMEM;
@@ -157,16 +157,16 @@ static int exynos_srom_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static void exynos_srom_save(void __iomem *base,
- struct exynos_srom_reg_dump *rd,
- unsigned int num_regs)
+ struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
{
for (; num_regs > 0; --num_regs, ++rd)
rd->value = readl(base + rd->offset);
}
static void exynos_srom_restore(void __iomem *base,
- const struct exynos_srom_reg_dump *rd,
- unsigned int num_regs)
+ const struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
{
for (; num_regs > 0; --num_regs, ++rd)
writel(rd->value, base + rd->offset);
@@ -177,7 +177,7 @@ static int exynos_srom_suspend(struct device *dev)
struct exynos_srom *srom = dev_get_drvdata(dev);
exynos_srom_save(srom->reg_base, srom->reg_offset,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
return 0;
}
@@ -186,7 +186,7 @@ static int exynos_srom_resume(struct device *dev)
struct exynos_srom *srom = dev_get_drvdata(dev);
exynos_srom_restore(srom->reg_base, srom->reg_offset,
- ARRAY_SIZE(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
return 0;
}
#endif
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 25196d6268e2..b9c7956e5031 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
@@ -21,6 +22,10 @@
#include "../jedec_ddr.h"
#include "../of_memory.h"
+static int irqmode;
+module_param(irqmode, int, 0644);
+MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
+
#define EXYNOS5_DREXI_TIMINGAREF (0x0030)
#define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
#define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
@@ -270,12 +275,14 @@ static int find_target_freq_idx(struct exynos5_dmc *dmc,
* This function switches between these banks according to the
* currently used clock source.
*/
-static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
{
unsigned int reg;
int ret;
ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+ if (ret)
+ return ret;
if (set)
reg |= EXYNOS5_TIMING_SET_SWI;
@@ -283,6 +290,8 @@ static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
reg &= ~EXYNOS5_TIMING_SET_SWI;
regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+
+ return 0;
}
/**
@@ -516,7 +525,7 @@ exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
/*
* Delays are long enough, so use them for the new coming clock.
*/
- exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+ ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
return ret;
}
@@ -577,7 +586,9 @@ exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
clk_set_rate(dmc->fout_bpll, target_rate);
- exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+ ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+ if (ret)
+ goto disable_clocks;
ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
if (ret)
@@ -945,6 +956,7 @@ static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
* It provides to the devfreq framework needed functions and polling period.
*/
static struct devfreq_dev_profile exynos5_dmc_df_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
.target = exynos5_dmc_target,
.get_dev_status = exynos5_dmc_get_status,
.get_cur_freq = exynos5_dmc_get_cur_freq,
@@ -1392,7 +1404,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
return PTR_ERR(dmc->base_drexi1);
dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
- "samsung,syscon-clk");
+ "samsung,syscon-clk");
if (IS_ERR(dmc->clk_regmap))
return PTR_ERR(dmc->clk_regmap);
@@ -1427,7 +1439,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
/* There is two modes in which the driver works: polling or IRQ */
irq[0] = platform_get_irq_byname(pdev, "drex_0");
irq[1] = platform_get_irq_byname(pdev, "drex_1");
- if (irq[0] > 0 && irq[1] > 0) {
+ if (irq[0] > 0 && irq[1] > 0 && irqmode) {
ret = devm_request_threaded_irq(dev, irq[0], NULL,
dmc_irq_thread, IRQF_ONESHOT,
dev_name(dev), dmc);
@@ -1465,13 +1477,12 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
* Setup default thresholds for the devfreq governor.
* The values are chosen based on experiments.
*/
- dmc->gov_data.upthreshold = 30;
+ dmc->gov_data.upthreshold = 10;
dmc->gov_data.downdifferential = 5;
- exynos5_dmc_df_profile.polling_ms = 500;
+ exynos5_dmc_df_profile.polling_ms = 100;
}
-
dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&dmc->gov_data);
@@ -1484,7 +1495,7 @@ static int exynos5_dmc_probe(struct platform_device *pdev)
if (dmc->in_irq_mode)
exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
- dev_info(dev, "DMC initialized\n");
+ dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode);
return 0;
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index fbfbaada61a2..9f0a96bf9ccc 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -36,3 +36,17 @@ config TEGRA124_EMC
Tegra124 chips. The EMC controls the external DRAM on the board.
This driver is required to change memory timings / clock rate for
external memory.
+
+config TEGRA210_EMC_TABLE
+ bool
+ depends on ARCH_TEGRA_210_SOC
+
+config TEGRA210_EMC
+ tristate "NVIDIA Tegra210 External Memory Controller driver"
+ depends on TEGRA_MC && ARCH_TEGRA_210_SOC
+ select TEGRA210_EMC_TABLE
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra210 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 529d10bc5650..6c1a2ecc6628 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -13,5 +13,9 @@ obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
+obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
+obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o tegra186-emc.o
obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra186-emc.o
+
+tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 957c6eb74ff9..afa3ba45c9e6 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -34,6 +34,7 @@
#define MC_EMEM_ARB_TIMING_W2W 0xbc
#define MC_EMEM_ARB_TIMING_R2W 0xc0
#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_MISC2 0xc8
#define MC_EMEM_ARB_DA_TURNS 0xd0
#define MC_EMEM_ARB_DA_COVERS 0xd4
#define MC_EMEM_ARB_MISC0 0xd8
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 33b8216bac30..ba5cb1f4dfc2 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -984,6 +984,7 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra124-emc" },
+ { .compatible = "nvidia,tegra132-emc" },
{}
};
@@ -1178,11 +1179,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root, emc,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
&tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 97f26bc77ad4..8478f59db432 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -185,7 +185,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
if (IS_ERR(emc->clk)) {
err = PTR_ERR(emc->clk);
dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
- return err;
+ goto put_bpmp;
}
platform_set_drvdata(pdev, emc);
@@ -201,7 +201,7 @@ static int tegra186_emc_probe(struct platform_device *pdev)
err = tegra_bpmp_transfer(emc->bpmp, &msg);
if (err < 0) {
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
- return err;
+ goto put_bpmp;
}
emc->debugfs.min_rate = ULONG_MAX;
@@ -211,8 +211,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
emc->dvfs = devm_kmalloc_array(&pdev->dev, emc->num_dvfs,
sizeof(*emc->dvfs), GFP_KERNEL);
- if (!emc->dvfs)
- return -ENOMEM;
+ if (!emc->dvfs) {
+ err = -ENOMEM;
+ goto put_bpmp;
+ }
dev_dbg(&pdev->dev, "%u DVFS pairs:\n", emc->num_dvfs);
@@ -237,15 +239,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
"failed to set rate range [%lu-%lu] for %pC\n",
emc->debugfs.min_rate, emc->debugfs.max_rate,
emc->clk);
- return err;
+ goto put_bpmp;
}
emc->debugfs.root = debugfs_create_dir("emc", NULL);
- if (!emc->debugfs.root) {
- dev_err(&pdev->dev, "failed to create debugfs directory\n");
- return 0;
- }
-
debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
emc, &tegra186_emc_debug_available_rates_fops);
debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
@@ -254,6 +251,10 @@ static int tegra186_emc_probe(struct platform_device *pdev)
emc, &tegra186_emc_debug_max_rate_fops);
return 0;
+
+put_bpmp:
+ tegra_bpmp_put(emc->bpmp);
+ return err;
}
static int tegra186_emc_remove(struct platform_device *pdev)
@@ -267,10 +268,10 @@ static int tegra186_emc_remove(struct platform_device *pdev)
}
static const struct of_device_id tegra186_emc_of_match[] = {
-#if defined(CONFIG_ARCH_TEGRA186_SOC)
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
{ .compatible = "nvidia,tegra186-emc" },
#endif
-#if defined(CONFIG_ARCH_TEGRA194_SOC)
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra194-emc" },
#endif
{ /* sentinel */ }
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 5d53f11ca7b6..e25c954dde2e 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -1570,12 +1570,12 @@ static const struct of_device_id tegra186_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra186_mc_of_match);
-static int tegra186_mc_suspend(struct device *dev)
+static int __maybe_unused tegra186_mc_suspend(struct device *dev)
{
return 0;
}
-static int tegra186_mc_resume(struct device *dev)
+static int __maybe_unused tegra186_mc_resume(struct device *dev)
{
struct tegra186_mc *mc = dev_get_drvdata(dev);
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index b16715e9515d..027f46287dbf 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -7,11 +7,11 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -144,7 +144,6 @@ struct emc_timing {
struct tegra_emc {
struct device *dev;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -162,17 +161,13 @@ struct tegra_emc {
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
if (!status)
return IRQ_NONE;
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT)
- complete(&emc->clk_handshake_complete);
-
/* notify about HW problem */
if (status & EMC_REFRESH_OVERFLOW_INT)
dev_err_ratelimited(emc->dev,
@@ -224,14 +219,13 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
/* wait until programming has settled */
readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
- reinit_completion(&emc->clk_handshake_complete);
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
{
- unsigned long timeout;
+ int err;
+ u32 v;
dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
@@ -242,11 +236,12 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
return 0;
}
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "EMC-CAR handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
return 0;
@@ -412,7 +407,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 emc_cfg, emc_dbg;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -647,11 +642,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@ -686,7 +681,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
new file mode 100644
index 000000000000..ff55a17896fa
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -0,0 +1,1775 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/*
+ * Enable flags for specifying verbosity.
+ */
+#define INFO (1 << 0)
+#define STEPS (1 << 1)
+#define SUB_STEPS (1 << 2)
+#define PRELOCK (1 << 3)
+#define PRELOCK_STEPS (1 << 4)
+#define ACTIVE_EN (1 << 5)
+#define PRAMP_UP (1 << 6)
+#define PRAMP_DN (1 << 7)
+#define EMA_WRITES (1 << 10)
+#define EMA_UPDATES (1 << 11)
+#define PER_TRAIN (1 << 16)
+#define CC_PRINT (1 << 17)
+#define CCFIFO (1 << 29)
+#define REGS (1 << 30)
+#define REG_LISTS (1 << 31)
+
+#define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
+
+#define DVFS_CLOCK_CHANGE_VERSION 21021
+#define EMC_PRELOCK_VERSION 2101
+
+enum {
+ DVFS_SEQUENCE = 1,
+ WRITE_TRAINING_SEQUENCE = 2,
+ PERIODIC_TRAINING_SEQUENCE = 3,
+ DVFS_PT1 = 10,
+ DVFS_UPDATE = 11,
+ TRAINING_PT1 = 12,
+ TRAINING_UPDATE = 13,
+ PERIODIC_TRAINING_UPDATE = 14
+};
+
+/*
+ * PTFV defines - basically just indexes into the per table PTFV array.
+ */
+#define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
+#define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
+#define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
+#define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
+#define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
+#define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
+#define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
+#define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
+#define PTFV_DVFS_SAMPLES_INDEX 9
+#define PTFV_MOVAVG_WEIGHT_INDEX 10
+#define PTFV_CONFIG_CTRL_INDEX 11
+
+#define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
+
+/*
+ * Do arithmetic in fixed point.
+ */
+#define MOVAVG_PRECISION_FACTOR 100
+
+/*
+ * The division portion of the average operation.
+ */
+#define __AVERAGE_PTFV(dev) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
+ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+/*
+ * Convert val to fixed point and add it to the temporary average.
+ */
+#define __INCREMENT_PTFV(dev, val) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
+ ((val) * MOVAVG_PRECISION_FACTOR); })
+
+/*
+ * Convert a moving average back to integral form and return the value.
+ */
+#define __MOVAVG_AC(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ MOVAVG_PRECISION_FACTOR)
+
+/* Weighted update. */
+#define __WEIGHTED_UPDATE_PTFV(dev, nval) \
+ do { \
+ int w = PTFV_MOVAVG_WEIGHT_INDEX; \
+ int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
+ \
+ next->ptfv_list[dqs] = \
+ ((nval * MOVAVG_PRECISION_FACTOR) + \
+ (next->ptfv_list[dqs] * \
+ next->ptfv_list[w])) / \
+ (next->ptfv_list[w] + 1); \
+ \
+ emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
+ __stringify(dev), nval, next->ptfv_list[dqs]); \
+ } while (0)
+
+/* Access a particular average. */
+#define __MOVAVG(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+
+static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+{
+ bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
+ struct tegra210_emc_timing *last = emc->last;
+ struct tegra210_emc_timing *next = emc->next;
+ u32 last_timing_rate_mhz = last->rate / 1000;
+ u32 next_timing_rate_mhz = next->rate / 1000;
+ bool dvfs_update = type == DVFS_UPDATE;
+ s32 tdel = 0, tmdel = 0, adel = 0;
+ bool dvfs_pt1 = type == DVFS_PT1;
+ unsigned long cval = 0;
+ u32 temp[2][2], value;
+ unsigned int i;
+
+ /*
+ * Dev0 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 2, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev0 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U0] -
+ __MOVAVG_AC(next, C0D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U0] =
+ __MOVAVG_AC(next, C0D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U1] -
+ __MOVAVG_AC(next, C0D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U1] =
+ __MOVAVG_AC(next, C0D0U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U0] -
+ __MOVAVG_AC(next, C1D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U0] =
+ __MOVAVG_AC(next, C1D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U1] -
+ __MOVAVG_AC(next, C1D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U1] =
+ __MOVAVG_AC(next, C1D0U1);
+ }
+ }
+
+ if (emc->num_devices < 2)
+ goto done;
+
+ /*
+ * Dev1 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 1, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev1 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U0] -
+ __MOVAVG_AC(next, C0D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U0] =
+ __MOVAVG_AC(next, C0D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U1] -
+ __MOVAVG_AC(next, C0D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U1] =
+ __MOVAVG_AC(next, C0D1U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U0] -
+ __MOVAVG_AC(next, C1D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U0] =
+ __MOVAVG_AC(next, C1D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U1] -
+ __MOVAVG_AC(next, C1D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U1] =
+ __MOVAVG_AC(next, C1D1U1);
+ }
+ }
+
+done:
+ return adel;
+}
+
+static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+ struct tegra210_emc_timing *last,
+ struct tegra210_emc_timing *next)
+{
+#define __COPY_EMA(nt, lt, dev) \
+ ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
+ (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+ u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+ u32 delay;
+
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay = 2 + (delay / last->rate);
+
+ if (!next->periodic_training)
+ return 0;
+
+ if (type == DVFS_SEQUENCE) {
+ if (last->periodic_training &&
+ (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
+ PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
+ /*
+ * If the previous frequency was using periodic
+ * calibration then we can reuse the previous
+ * frequencies EMA data.
+ */
+ __COPY_EMA(next, last, C0D0U0);
+ __COPY_EMA(next, last, C0D0U1);
+ __COPY_EMA(next, last, C1D0U0);
+ __COPY_EMA(next, last, C1D0U1);
+ __COPY_EMA(next, last, C0D1U0);
+ __COPY_EMA(next, last, C0D1U1);
+ __COPY_EMA(next, last, C1D1U0);
+ __COPY_EMA(next, last, C1D1U1);
+ } else {
+ /* Reset the EMA.*/
+ __MOVAVG(next, C0D0U0) = 0;
+ __MOVAVG(next, C0D0U1) = 0;
+ __MOVAVG(next, C1D0U0) = 0;
+ __MOVAVG(next, C1D0U1) = 0;
+ __MOVAVG(next, C0D1U0) = 0;
+ __MOVAVG(next, C0D1U1) = 0;
+ __MOVAVG(next, C1D1U0) = 0;
+ __MOVAVG(next, C1D1U1) = 0;
+
+ for (i = 0; i < samples; i++) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ /*
+ * Generate next sample of data.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_PT1);
+ }
+ }
+
+ /*
+ * Seems like it should be part of the
+ * 'if (last_timing->periodic_training)' conditional
+ * since is already done for the else clause.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+ }
+
+ if (type == PERIODIC_TRAINING_SEQUENCE) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+ }
+
+ return adel;
+}
+
+static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+ u32 list[] = {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_DATA_BRLSHFT_0,
+ EMC_DATA_BRLSHFT_1
+ };
+ struct tegra210_emc_timing *last = emc->last;
+ unsigned int items = ARRAY_SIZE(list), i;
+ unsigned long delay;
+
+ if (last->periodic_training) {
+ emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
+
+ value = emc_readl(emc, EMC_DBG);
+ emc_cfg_o = emc_readl(emc, EMC_CFG);
+ emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
+ EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_PD |
+ EMC_CFG_DRAM_CLKSTOP_PD);
+
+
+ /*
+ * 1. Power optimizations should be off.
+ */
+ emc_writel(emc, emc_cfg, EMC_CFG);
+
+ /* Does emc_timing_update() for above changes. */
+ tegra210_emc_dll_disable(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
+ value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
+ value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_UPDATE);
+
+ /*
+ * 2. osc kick off - this assumes training and dvfs have set
+ * correct MR23.
+ */
+ tegra210_emc_start_periodic_compensation(emc);
+
+ /*
+ * 3. Let dram capture its clock tree delays.
+ */
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay /= last->rate + 1;
+ udelay(delay);
+
+ /*
+ * 4. Check delta wrt previous values (save value if margin
+ * exceeds what is set in table).
+ */
+ del = periodic_compensation_handler(emc,
+ PERIODIC_TRAINING_SEQUENCE,
+ last, last);
+
+ /*
+ * 5. Apply compensation w.r.t. trained values (if clock tree
+ * has drifted more than the set margin).
+ */
+ if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
+ for (i = 0; i < items; i++) {
+ value = tegra210_emc_compensate(last, list[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ list[i], value);
+ emc_writel(emc, value, list[i]);
+ }
+ }
+
+ emc_writel(emc, emc_cfg_o, EMC_CFG);
+
+ /*
+ * 6. Timing update actally applies the new trimmers.
+ */
+ tegra210_emc_timing_update(emc);
+
+ /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
+ emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
+
+ /* 6.2. Restore the DLL. */
+ tegra210_emc_dll_enable(emc);
+ }
+
+ return 0;
+}
+
+/*
+ * Do the clock change sequence.
+ */
+static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ /* state variables */
+ static bool fsp_for_next_freq;
+ /* constant configuration parameters */
+ const bool save_restore_clkstop_pd = true;
+ const u32 zqcal_before_cc_cutoff = 2400;
+ const bool cya_allow_ref_cc = false;
+ const bool cya_issue_pc_ref = false;
+ const bool opt_cc_short_zcal = true;
+ const bool ref_b4_sref_en = false;
+ const u32 tZQCAL_lpddr4 = 1000000;
+ const bool opt_short_zcal = true;
+ const bool opt_do_sw_qrst = true;
+ const u32 opt_dvfs_mode = MAN_SR;
+ /*
+ * This is the timing table for the source frequency. It does _not_
+ * necessarily correspond to the actual timing values in the EMC at the
+ * moment. If the boot BCT differs from the table then this can happen.
+ * However, we need it for accessing the dram_timings (which are not
+ * really registers) array for the current frequency.
+ */
+ struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
+ u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
+ u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
+ u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
+ u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
+ u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
+ u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
+ bool opt_zcal_en_cc = false, is_lpddr3 = false;
+ bool compensate_trimmer_applicable = false;
+ u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
+ u32 src_clk_period, dst_clk_period; /* in picoseconds */
+ bool shared_zq_resistor = false;
+ u32 value, dram_type;
+ u32 opt_dll_mode = 0;
+ unsigned long delay;
+ unsigned int i;
+
+ emc_dbg(emc, INFO, "Running clock change.\n");
+
+ /* XXX fake == last */
+ fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
+ fsp_for_next_freq = !fsp_for_next_freq;
+
+ value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
+ shared_zq_resistor = true;
+
+ if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
+ last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
+ dram_type == DRAM_TYPE_LPDDR4)
+ opt_zcal_en_cc = true;
+
+ if (dram_type == DRAM_TYPE_DDR3)
+ opt_dll_mode = tegra210_emc_get_dll_state(next);
+
+ if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
+ (dram_type == DRAM_TYPE_LPDDR2))
+ is_lpddr3 = true;
+
+ emc_readl(emc, EMC_CFG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG);
+
+ src_clk_period = 1000000000 / last->rate;
+ dst_clk_period = 1000000000 / next->rate;
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff)
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
+ else
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
+
+ tZQCAL_lpddr4_fc_adj /= dst_clk_period;
+
+ emc_dbg = emc_readl(emc, EMC_DBG);
+ emc_pin = emc_readl(emc, EMC_PIN);
+ emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
+
+ emc_cfg = next->burst_regs[EMC_CFG_INDEX];
+ emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
+ emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
+ emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
+
+ emc_dbg(emc, INFO, "Clock change version: %d\n",
+ DVFS_CLOCK_CHANGE_VERSION);
+ emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
+ emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
+ emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
+ emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
+ emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
+ next->rate);
+ emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
+ src_clk_period, dst_clk_period);
+ emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
+ emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
+ emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
+
+ /*
+ * Step 1:
+ * Pre DVFS SW sequence.
+ */
+ emc_dbg(emc, STEPS, "Step 1\n");
+ emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
+
+ emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
+
+ emc_auto_cal_config = next->emc_auto_cal_config;
+ auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+ emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
+ emc_auto_cal_config |= auto_cal_en;
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
+
+ emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ if (next->periodic_training) {
+ tegra210_emc_reset_dram_clktree_values(next);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ tegra210_emc_start_periodic_compensation(emc);
+
+ delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
+ udelay((delay / last->rate) + 2);
+
+ value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
+ next);
+ value = (value * 128 * next->rate / 1000) / 1000000;
+
+ if (next->periodic_training && value > next->tree_margin)
+ compensate_trimmer_applicable = true;
+ }
+
+ emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
+ EMC_CFG_PIPE_CLK);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
+ ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+
+ bg_reg_mode_change =
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
+ enable_bglp_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
+ enable_bg_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+
+ if (enable_bglp_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /* Check if we need to turn on VREF generator. */
+ if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
+ (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
+ u32 pad_tx_ctrl =
+ next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 last_pad_tx_ctrl =
+ last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 next_dq_e_ivref, next_dqs_e_ivref;
+
+ next_dqs_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
+ next_dq_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
+ value = (last_pad_tx_ctrl &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
+ next_dq_e_ivref | next_dqs_e_ivref;
+ emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
+ udelay(1);
+ } else if (bg_reg_mode_change) {
+ udelay(1);
+ }
+
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 2:
+ * Prelock the DLL.
+ */
+ emc_dbg(emc, STEPS, "Step 2\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
+ EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
+ value = tegra210_emc_dll_prelock(emc, clksrc);
+ emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
+ } else {
+ emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
+ tegra210_emc_dll_disable(emc);
+ }
+
+ /*
+ * Step 3:
+ * Prepare autocal for the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 3\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
+ emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
+ emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
+ emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
+ emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
+ emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
+ emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
+ auto_cal_en);
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /*
+ * Step 4:
+ * Update EMC_CFG. (??)
+ */
+ emc_dbg(emc, STEPS, "Step 4\n");
+
+ if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
+ else
+ emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
+
+ /*
+ * Step 5:
+ * Prepare reference variables for ZQCAL regs.
+ */
+ emc_dbg(emc, STEPS, "Step 5\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
+ else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
+ zq_wait_long = max(next->min_mrs_wait,
+ div_o3(360000, dst_clk_period)) + 4;
+ else if (dram_type == DRAM_TYPE_DDR3)
+ zq_wait_long = max((u32)256,
+ div_o3(320000, dst_clk_period) + 2);
+ else
+ zq_wait_long = 0;
+
+ /*
+ * Step 6:
+ * Training code - removed.
+ */
+ emc_dbg(emc, STEPS, "Step 6\n");
+
+ /*
+ * Step 7:
+ * Program FSP reference registers and send MRWs to new FSPWR.
+ */
+ emc_dbg(emc, STEPS, "Step 7\n");
+ emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
+
+ /* WAR 200024907 */
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ u32 nRTP = 16;
+
+ if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
+ nRTP = 14;
+
+ if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
+ nRTP = 12;
+
+ if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
+ nRTP = 10;
+
+ if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
+ nRTP = 8;
+
+ deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
+
+ /*
+ * Originally there was a + .5 in the tRPST calculation.
+ * However since we can't do FP in the kernel and the tRTM
+ * computation was in a floating point ceiling function, adding
+ * one to tRTP should be ok. There is no other source of non
+ * integer values, so the result was always going to be
+ * something for the form: f_ceil(N + .5) = N + 1;
+ */
+ tRPST = (last->emc_mrw & 0x80) >> 7;
+ tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
+ max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
+ 1 + nRTP;
+
+ emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
+ next->burst_regs[EMC_RP_INDEX]);
+
+ if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
+ if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
+ last->burst_regs[EMC_RP_INDEX])) {
+ R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+
+ if (R2P_war > 63) {
+ RP_war = R2P_war +
+ last->burst_regs[EMC_RP_INDEX] - 63;
+
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+
+ R2P_war = 63;
+ }
+ } else {
+ R2P_war = last->burst_regs[EMC_R2P_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+ }
+
+ if (RP_war < deltaTWATM) {
+ W2P_war = last->burst_regs[EMC_W2P_INDEX]
+ + deltaTWATM - RP_war;
+ if (W2P_war > 63) {
+ RP_war = RP_war + W2P_war - 63;
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+ W2P_war = 63;
+ }
+ } else {
+ W2P_war = last->burst_regs[
+ EMC_W2P_INDEX];
+ }
+
+ if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
+ (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
+ (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
+ (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
+ emc_writel(emc, RP_war, EMC_RP);
+ emc_writel(emc, R2P_war, EMC_R2P);
+ emc_writel(emc, W2P_war, EMC_W2P);
+ emc_writel(emc, TRPab_war, EMC_TRPAB);
+ }
+
+ tegra210_emc_timing_update(emc);
+ } else {
+ emc_dbg(emc, INFO, "Skipped WAR\n");
+ }
+ }
+
+ if (!fsp_for_next_freq) {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
+ } else {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
+ emc_writel(emc, next->emc_mrw, EMC_MRW);
+ emc_writel(emc, next->emc_mrw2, EMC_MRW2);
+ }
+
+ /*
+ * Step 8:
+ * Program the shadow registers.
+ */
+ emc_dbg(emc, STEPS, "Step 8\n");
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
+
+ for (i = 0; i < next->num_burst; i++) {
+ const u16 *offsets = emc->offsets->burst;
+ u16 offset;
+
+ if (!offsets[i])
+ continue;
+
+ value = next->burst_regs[i];
+ offset = offsets[i];
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (offset == EMC_MRW6 || offset == EMC_MRW7 ||
+ offset == EMC_MRW8 || offset == EMC_MRW9 ||
+ offset == EMC_MRW10 || offset == EMC_MRW11 ||
+ offset == EMC_MRW12 || offset == EMC_MRW13 ||
+ offset == EMC_MRW14 || offset == EMC_MRW15 ||
+ offset == EMC_TRAINING_CTRL))
+ continue;
+
+ /* Pain... And suffering. */
+ if (offset == EMC_CFG) {
+ value &= ~EMC_CFG_DRAM_ACPD;
+ value &= ~EMC_CFG_DYN_SELF_REF;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
+ value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
+ }
+ } else if (offset == EMC_MRS_WAIT_CNT &&
+ dram_type == DRAM_TYPE_LPDDR2 &&
+ opt_zcal_en_cc && !opt_cc_short_zcal &&
+ opt_short_zcal) {
+ value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
+ ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_WAIT_CNT &&
+ dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
+ !opt_cc_short_zcal && opt_short_zcal) {
+ value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
+ EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
+ ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
+ value = 0; /* EMC_ZCAL_INTERVAL reset value. */
+ } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
+ value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
+ } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
+ value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
+ value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
+ value &= 0xf800f800;
+ } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
+ value &= 0xfffffff0;
+ }
+
+ emc_writel(emc, value, offset);
+ }
+
+ /* SW addition: do EMC refresh adjustment here. */
+ tegra210_emc_adjust_timing(emc, next);
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (23 << EMC_MRW_MRW_MA_SHIFT) |
+ (next->run_clocks & EMC_MRW_MRW_OP_MASK);
+ emc_writel(emc, value, EMC_MRW);
+ }
+
+ /* Per channel burst registers. */
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
+
+ for (i = 0; i < next->num_burst_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *burst =
+ emc->offsets->burst_per_channel;
+
+ if (!burst[i].offset)
+ continue;
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (burst[i].offset == EMC_MRW6 ||
+ burst[i].offset == EMC_MRW7 ||
+ burst[i].offset == EMC_MRW8 ||
+ burst[i].offset == EMC_MRW9 ||
+ burst[i].offset == EMC_MRW10 ||
+ burst[i].offset == EMC_MRW11 ||
+ burst[i].offset == EMC_MRW12 ||
+ burst[i].offset == EMC_MRW13 ||
+ burst[i].offset == EMC_MRW14 ||
+ burst[i].offset == EMC_MRW15))
+ continue;
+
+ /* Filter out second channel if not in DUAL_CHANNEL mode. */
+ if (emc->num_channels < 2 && burst[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->burst_reg_per_ch[i], burst[i].offset);
+ emc_channel_writel(emc, burst[i].bank,
+ next->burst_reg_per_ch[i],
+ burst[i].offset);
+ }
+
+ /* Vref regs. */
+ emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
+
+ for (i = 0; i < next->vref_num; i++) {
+ const struct tegra210_emc_per_channel_regs *vref =
+ emc->offsets->vref_per_channel;
+
+ if (!vref[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && vref[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->vref_perch_regs[i], vref[i].offset);
+ emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
+ vref[i].offset);
+ }
+
+ /* Trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
+
+ for (i = 0; i < next->num_trim; i++) {
+ const u16 *offsets = emc->offsets->trim;
+
+ if (!offsets[i])
+ continue;
+
+ if (compensate_trimmer_applicable &&
+ (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offsets[i] == EMC_DATA_BRLSHFT_0 ||
+ offsets[i] == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offsets[i]);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offsets[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ (u32)(u64)offsets[i], value);
+ emc_writel(emc, value, offsets[i]);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_regs[i], offsets[i]);
+ emc_writel(emc, next->trim_regs[i], offsets[i]);
+ }
+ }
+
+ /* Per channel trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
+
+ for (i = 0; i < next->num_trim_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *trim =
+ &emc->offsets->trim_per_channel[0];
+ unsigned int offset;
+
+ if (!trim[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && trim[i].bank >= 1)
+ continue;
+
+ offset = trim[i].offset;
+
+ if (compensate_trimmer_applicable &&
+ (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offset == EMC_DATA_BRLSHFT_0 ||
+ offset == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offset);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offset);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
+ value);
+ emc_channel_writel(emc, trim[i].bank, value, offset);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_perch_regs[i], offset);
+ emc_channel_writel(emc, trim[i].bank,
+ next->trim_perch_regs[i], offset);
+ }
+ }
+
+ emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
+
+ for (i = 0; i < next->num_mc_regs; i++) {
+ const u16 *offsets = emc->offsets->burst_mc;
+ u32 *values = next->burst_mc_regs;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ values[i], offsets[i]);
+ mc_writel(emc->mc, values[i], offsets[i]);
+ }
+
+ /* Registers to be programmed on the faster clock. */
+ if (next->rate < last->rate) {
+ const u16 *la = emc->offsets->la_scale;
+
+ emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
+
+ for (i = 0; i < next->num_up_down; i++) {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->la_scale_regs[i], la[i]);
+ mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
+ }
+ }
+
+ /* Flush all the burst register writes. */
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ /*
+ * Step 9:
+ * LPDDR4 section A.
+ */
+ emc_dbg(emc, STEPS, "Step 9\n");
+
+ value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
+ value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
+
+ value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
+ EMC_DBG_WRITE_ACTIVE_ONLY);
+
+ emc_writel(emc, value, EMC_DBG);
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, emc_dbg, EMC_DBG);
+ }
+
+ /*
+ * Step 10:
+ * LPDDR4 and DDR3 common section.
+ */
+ emc_dbg(emc, STEPS, "Step 10\n");
+
+ if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
+ else
+ ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
+
+ if (dram_type == DRAM_TYPE_LPDDR4 &&
+ dst_clk_period <= zqcal_before_cc_cutoff) {
+ ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW6_INDEX] &
+ 0x0000C0C0), EMC_MRW6, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW14_INDEX] &
+ 0x00003838), EMC_MRW14, 0);
+
+ if (emc->num_devices > 1) {
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW7_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW7_INDEX] &
+ 0x0000C0C0), EMC_MRW7, 0);
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW15_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW15_INDEX] &
+ 0x00003838), EMC_MRW15, 0);
+ }
+
+ if (opt_zcal_en_cc) {
+ if (emc->num_devices < 2)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else if (shared_zq_resistor)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else
+ ccfifo_writel(emc,
+ EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
+ ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
+ ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
+ delay = 30;
+
+ if (cya_allow_ref_cc) {
+ delay += (1000 * fake->dram_timings[T_RP]) /
+ src_clk_period;
+ delay += 4000 * fake->dram_timings[T_RFC];
+ }
+
+ ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
+ EMC_PIN_PIN_CKEB |
+ EMC_PIN_PIN_CKE),
+ EMC_PIN, delay);
+ }
+
+ /* calculate reference delay multiplier */
+ value = 1;
+
+ if (ref_b4_sref_en)
+ value++;
+
+ if (cya_allow_ref_cc)
+ value++;
+
+ if (cya_issue_pc_ref)
+ value++;
+
+ if (dram_type != DRAM_TYPE_LPDDR4) {
+ delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
+ (1000 * fake->dram_timings[T_RFC] / src_clk_period));
+ delay = value * delay + 20;
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Step 11:
+ * Ramp down.
+ */
+ emc_dbg(emc, STEPS, "Step 11\n");
+
+ ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
+
+ value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
+ 0);
+
+ /*
+ * Step 12:
+ * And finally - trigger the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 12\n");
+
+ ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
+ value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ /*
+ * Step 13:
+ * Ramp up.
+ */
+ emc_dbg(emc, STEPS, "Step 13\n");
+
+ ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+
+ /*
+ * Step 14:
+ * Bringup CKE pins.
+ */
+ emc_dbg(emc, STEPS, "Step 14\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = emc_pin | EMC_PIN_PIN_CKE;
+
+ if (emc->num_devices <= 1)
+ value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
+ else
+ value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
+
+ ccfifo_writel(emc, value, EMC_PIN, 0);
+ }
+
+ /*
+ * Step 15: (two step 15s ??)
+ * Calculate zqlatch wait time; has dependency on ramping times.
+ */
+ emc_dbg(emc, STEPS, "Step 15\n");
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff) {
+ s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
+ (s32)dst_clk_period;
+ zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
+ } else {
+ zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
+ div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+ }
+
+ emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
+ emc_dbg(emc, INFO, "dst_clk_period = %u\n",
+ dst_clk_period);
+ emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
+ next->dram_timings[T_PDEX]);
+ emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+
+ if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
+ delay = div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+
+ if (emc->num_devices < 2) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ } else if (shared_zq_resistor) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time) +
+ delay);
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL, 0);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, 0);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ tZQCAL_lpddr4 / dst_clk_period);
+ } else {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ }
+ }
+
+ /* WAR: delay for zqlatch */
+ ccfifo_writel(emc, 0, 0, 10);
+
+ /*
+ * Step 16:
+ * LPDDR4 Conditional Training Kickoff. Removed.
+ */
+
+ /*
+ * Step 17:
+ * MANSR exit self refresh.
+ */
+ emc_dbg(emc, STEPS, "Step 17\n");
+
+ if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+
+ /*
+ * Step 18:
+ * Send MRWs to LPDDR3/DDR3.
+ */
+ emc_dbg(emc, STEPS, "Step 18\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
+ ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
+ if (is_lpddr3)
+ ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ if (opt_dll_mode)
+ ccfifo_writel(emc, next->emc_emrs &
+ ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
+ ccfifo_writel(emc, next->emc_emrs2 &
+ ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
+ ccfifo_writel(emc, next->emc_mrs |
+ EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
+ }
+
+ /*
+ * Step 19:
+ * ZQCAL for LPDDR3/DDR3
+ */
+ emc_dbg(emc, STEPS, "Step 19\n");
+
+ if (opt_zcal_en_cc) {
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ value = opt_cc_short_zcal ? 90000 : 360000;
+ value = div_o3(value, dst_clk_period);
+ value = value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
+ value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
+
+ value = opt_cc_short_zcal ? 0x56 : 0xab;
+ ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT,
+ EMC_MRW, 0);
+
+ if (emc->num_devices > 1) {
+ value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRW, 0);
+ }
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
+
+ ccfifo_writel(emc, value |
+ 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ 0);
+
+ if (emc->num_devices > 1) {
+ value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD;
+ ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (bg_reg_mode_change) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+
+ if (ramp_up_wait <= 1250000)
+ delay = (1250000 - ramp_up_wait) / dst_clk_period;
+ else
+ delay = 0;
+
+ ccfifo_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
+ EMC_PMACRO_BG_BIAS_CTRL_0, delay);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 20:
+ * Issue ref and optional QRST.
+ */
+ emc_dbg(emc, STEPS, "Step 20\n");
+
+ if (dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ if (opt_do_sw_qrst) {
+ ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
+ ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
+ }
+
+ /*
+ * Step 21:
+ * Restore ZCAL and ZCAL interval.
+ */
+ emc_dbg(emc, STEPS, "Step 21\n");
+
+ if (save_restore_clkstop_pd || opt_zcal_en_cc) {
+ ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ EMC_DBG, 0);
+ if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL, 0);
+
+ if (save_restore_clkstop_pd)
+ ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
+ ~EMC_CFG_DYN_SELF_REF,
+ EMC_CFG, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+ }
+
+ /*
+ * Step 22:
+ * Restore EMC_CFG_PIPE_CLK.
+ */
+ emc_dbg(emc, STEPS, "Step 22\n");
+
+ ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ else
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /*
+ * Step 23:
+ */
+ emc_dbg(emc, STEPS, "Step 23\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_do_clock_change(emc, clksrc);
+
+ /*
+ * Step 24:
+ * Save training results. Removed.
+ */
+
+ /*
+ * Step 25:
+ * Program MC updown registers.
+ */
+ emc_dbg(emc, STEPS, "Step 25\n");
+
+ if (next->rate > last->rate) {
+ for (i = 0; i < next->num_up_down; i++)
+ mc_writel(emc->mc, next->la_scale_regs[i],
+ emc->offsets->la_scale[i]);
+
+ tegra210_emc_timing_update(emc);
+ }
+
+ /*
+ * Step 26:
+ * Restore ZCAL registers.
+ */
+ emc_dbg(emc, STEPS, "Step 26\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
+ !opt_short_zcal && opt_cc_short_zcal) {
+ udelay(2);
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ if (dram_type == DRAM_TYPE_LPDDR2)
+ emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
+ EMC_MRS_WAIT_CNT);
+ else if (dram_type == DRAM_TYPE_DDR3)
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 27:
+ * Restore EMC_CFG, FDPD registers.
+ */
+ emc_dbg(emc, STEPS, "Step 27\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+ emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+
+ /*
+ * Step 28:
+ * Training recover. Removed.
+ */
+ emc_dbg(emc, STEPS, "Step 28\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
+ EMC_PMACRO_AUTOCAL_CFG_COMMON);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 29:
+ * Power fix WAR.
+ */
+ emc_dbg(emc, STEPS, "Step 29\n");
+
+ emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
+ EMC_PMACRO_CFG_PM_GLOBAL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_1);
+ emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
+
+ /*
+ * Step 30:
+ * Re-enable autocal.
+ */
+ emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+ }
+
+ emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /* Done! Yay. */
+}
+
+const struct tegra210_emc_sequence tegra210_emc_r21021 = {
+ .revision = 0x7,
+ .set_clock = tegra210_emc_r21021_set_clock,
+ .periodic_compensation = tegra210_emc_r21021_periodic_compensation,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
new file mode 100644
index 000000000000..cdd663ba4733
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -0,0 +1,2100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define EMC_CLK_EMC_2X_CLK_SRC_SHIFT 29
+#define EMC_CLK_EMC_2X_CLK_SRC_MASK \
+ (0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
+#define EMC_CLK_SOURCE_PLLM_LJ 0x4
+#define EMC_CLK_SOURCE_PLLMB_LJ 0x5
+#define EMC_CLK_FORCE_CC_TRIGGER BIT(27)
+#define EMC_CLK_MC_EMC_SAME_FREQ BIT(16)
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT 0
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK \
+ (0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL */
+#define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT 29
+#define DLL_CLK_EMC_DLL_CLK_SRC_MASK \
+ (0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT 10
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK \
+ (0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
+#define PLLM_VCOA 0
+#define PLLM_VCOB 1
+#define EMC_DLL_SWITCH_OUT 2
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT 0
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK \
+ (0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
+
+/* MC_EMEM_ARB_MISC0 */
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ BIT(27)
+
+/* EMC_DATA_BRLSHFT_X */
+#define EMC0_EMC_DATA_BRLSHFT_0_INDEX 2
+#define EMC1_EMC_DATA_BRLSHFT_0_INDEX 3
+#define EMC0_EMC_DATA_BRLSHFT_1_INDEX 4
+#define EMC1_EMC_DATA_BRLSHFT_1_INDEX 5
+
+#define TRIM_REG(chan, rank, reg, byte) \
+ (((EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _MASK & \
+ next->trim_regs[EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## \
+ rank ## _ ## reg ## _INDEX]) >> \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _SHIFT) \
+ + \
+ (((EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_MASK & \
+ next->trim_perch_regs[EMC ## chan ## \
+ _EMC_DATA_BRLSHFT_ ## rank ## _INDEX]) >> \
+ EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_SHIFT) * 64))
+
+#define CALC_TEMP(rank, reg, byte1, byte2, n) \
+ (((new[n] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## \
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _MASK) \
+ | \
+ ((new[n + 1] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##\
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _MASK))
+
+#define REFRESH_SPEEDUP(value, speedup) \
+ (((value) & 0xffff0000) | ((value) & 0xffff) * (speedup))
+
+#define LPDDR2_MR4_SRR GENMASK(2, 0)
+
+static const struct tegra210_emc_sequence *tegra210_emc_sequences[] = {
+ &tegra210_emc_r21021,
+};
+
+static const struct tegra210_emc_table_register_offsets
+tegra210_emc_table_register_offsets = {
+ .burst = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFCPB,
+ EMC_REFCTRL2,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_R2R,
+ EMC_TPPD,
+ EMC_CCDMW,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV_CHK,
+ EMC_WDV,
+ EMC_WSV,
+ EMC_WEV,
+ EMC_WDV_MASK,
+ EMC_WS_DURATION,
+ EMC_WE_DURATION,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_OBDLY,
+ EMC_EINPUT,
+ EMC_MRW6,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_RDV_EARLY,
+ EMC_RDV_EARLY_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_CKE2PDEN,
+ EMC_PDEX2CKE,
+ EMC_PDEX2MRR,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_MRW7,
+ EMC_TREFBW,
+ EMC_ODT_WRITE,
+ EMC_FBIO_CFG5,
+ EMC_FBIO_CFG7,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_PMACRO_IB_RXRT,
+ EMC_CFG_PIPE_1,
+ EMC_CFG_PIPE_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_4,
+ EMC_PMACRO_QUSE_DDLL_RANK0_5,
+ EMC_PMACRO_QUSE_DDLL_RANK1_4,
+ EMC_PMACRO_QUSE_DDLL_RANK1_5,
+ EMC_MRW8,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5,
+ EMC_PMACRO_DDLL_LONG_CMD_0,
+ EMC_PMACRO_DDLL_LONG_CMD_1,
+ EMC_PMACRO_DDLL_LONG_CMD_2,
+ EMC_PMACRO_DDLL_LONG_CMD_3,
+ EMC_PMACRO_DDLL_LONG_CMD_4,
+ EMC_PMACRO_DDLL_SHORT_CMD_0,
+ EMC_PMACRO_DDLL_SHORT_CMD_1,
+ EMC_PMACRO_DDLL_SHORT_CMD_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3,
+ EMC_TXDSRVTTGEN,
+ EMC_FDPD_CTRL_DQ,
+ EMC_FDPD_CTRL_CMD,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_INTERVAL,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_AUTO_CAL_CHANNEL,
+ EMC_DLL_CFG_0,
+ EMC_DLL_CFG_1,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON,
+ EMC_PMACRO_ZCTRL,
+ EMC_CFG,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP,
+ EMC_DQS_BRLSHFT_0,
+ EMC_DQS_BRLSHFT_1,
+ EMC_CMD_BRLSHFT_2,
+ EMC_CMD_BRLSHFT_3,
+ EMC_PMACRO_PAD_CFG_CTRL,
+ EMC_PMACRO_DATA_PAD_RX_CTRL,
+ EMC_PMACRO_CMD_PAD_RX_CTRL,
+ EMC_PMACRO_DATA_RX_TERM_MODE,
+ EMC_PMACRO_CMD_RX_TERM_MODE,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ EMC_PMACRO_DATA_PAD_TX_CTRL,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ EMC_PMACRO_VTTGEN_CTRL_0,
+ EMC_PMACRO_VTTGEN_CTRL_1,
+ EMC_PMACRO_VTTGEN_CTRL_2,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ EMC_PMACRO_CMD_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BRICK_CTRL_RFU2,
+ EMC_PMACRO_DATA_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BG_BIAS_CTRL_0,
+ EMC_CFG_3,
+ EMC_PMACRO_TX_PWRD_0,
+ EMC_PMACRO_TX_PWRD_1,
+ EMC_PMACRO_TX_PWRD_2,
+ EMC_PMACRO_TX_PWRD_3,
+ EMC_PMACRO_TX_PWRD_4,
+ EMC_PMACRO_TX_PWRD_5,
+ EMC_CONFIG_SAMPLE_DELAY,
+ EMC_PMACRO_TX_SEL_CLK_SRC_0,
+ EMC_PMACRO_TX_SEL_CLK_SRC_1,
+ EMC_PMACRO_TX_SEL_CLK_SRC_2,
+ EMC_PMACRO_TX_SEL_CLK_SRC_3,
+ EMC_PMACRO_TX_SEL_CLK_SRC_4,
+ EMC_PMACRO_TX_SEL_CLK_SRC_5,
+ EMC_PMACRO_DDLL_BYPASS,
+ EMC_PMACRO_DDLL_PWRD_0,
+ EMC_PMACRO_DDLL_PWRD_1,
+ EMC_PMACRO_DDLL_PWRD_2,
+ EMC_PMACRO_CMD_CTRL_0,
+ EMC_PMACRO_CMD_CTRL_1,
+ EMC_PMACRO_CMD_CTRL_2,
+ EMC_TR_TIMING_0,
+ EMC_TR_DVFS,
+ EMC_TR_CTRL_1,
+ EMC_TR_RDV,
+ EMC_TR_QPOP,
+ EMC_TR_RDV_MASK,
+ EMC_MRW14,
+ EMC_TR_QSAFE,
+ EMC_TR_QRST,
+ EMC_TRAINING_CTRL,
+ EMC_TRAINING_SETTLE,
+ EMC_TRAINING_VREF_SETTLE,
+ EMC_TRAINING_CA_FINE_CTRL,
+ EMC_TRAINING_CA_CTRL_MISC,
+ EMC_TRAINING_CA_CTRL_MISC1,
+ EMC_TRAINING_CA_VREF_CTRL,
+ EMC_TRAINING_QUSE_CORS_CTRL,
+ EMC_TRAINING_QUSE_FINE_CTRL,
+ EMC_TRAINING_QUSE_CTRL_MISC,
+ EMC_TRAINING_QUSE_VREF_CTRL,
+ EMC_TRAINING_READ_FINE_CTRL,
+ EMC_TRAINING_READ_CTRL_MISC,
+ EMC_TRAINING_READ_VREF_CTRL,
+ EMC_TRAINING_WRITE_FINE_CTRL,
+ EMC_TRAINING_WRITE_CTRL_MISC,
+ EMC_TRAINING_WRITE_VREF_CTRL,
+ EMC_TRAINING_MPC,
+ EMC_MRW15,
+ },
+ .trim = {
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_IB_VREF_DQS_0,
+ EMC_PMACRO_IB_VREF_DQS_1,
+ EMC_PMACRO_IB_VREF_DQ_0,
+ EMC_PMACRO_IB_VREF_DQ_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_0,
+ EMC_PMACRO_QUSE_DDLL_RANK0_1,
+ EMC_PMACRO_QUSE_DDLL_RANK0_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_3,
+ EMC_PMACRO_QUSE_DDLL_RANK1_0,
+ EMC_PMACRO_QUSE_DDLL_RANK1_1,
+ EMC_PMACRO_QUSE_DDLL_RANK1_2,
+ EMC_PMACRO_QUSE_DDLL_RANK1_3
+ },
+ .burst_mc = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_REFPB_HP_CTRL,
+ MC_EMEM_ARB_REFPB_BANK_CTRL,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_CCDMW,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_TIMING_RFCPB,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_MISC2,
+ MC_EMEM_ARB_RING1_THROTTLE,
+ MC_EMEM_ARB_DHYST_CTRL,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7,
+ },
+ .la_scale = {
+ MC_MLL_MPCORER_PTSA_RATE,
+ MC_FTOP_PTSA_RATE,
+ MC_PTSA_GRANT_DECREMENT,
+ MC_LATENCY_ALLOWANCE_XUSB_0,
+ MC_LATENCY_ALLOWANCE_XUSB_1,
+ MC_LATENCY_ALLOWANCE_TSEC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCA_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAA_0,
+ MC_LATENCY_ALLOWANCE_SDMMC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAB_0,
+ MC_LATENCY_ALLOWANCE_PPCS_0,
+ MC_LATENCY_ALLOWANCE_PPCS_1,
+ MC_LATENCY_ALLOWANCE_MPCORE_0,
+ MC_LATENCY_ALLOWANCE_HC_0,
+ MC_LATENCY_ALLOWANCE_HC_1,
+ MC_LATENCY_ALLOWANCE_AVPC_0,
+ MC_LATENCY_ALLOWANCE_GPU_0,
+ MC_LATENCY_ALLOWANCE_GPU2_0,
+ MC_LATENCY_ALLOWANCE_NVENC_0,
+ MC_LATENCY_ALLOWANCE_NVDEC_0,
+ MC_LATENCY_ALLOWANCE_VIC_0,
+ MC_LATENCY_ALLOWANCE_VI2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_1,
+ },
+ .burst_per_channel = {
+ { .bank = 0, .offset = EMC_MRW10, },
+ { .bank = 1, .offset = EMC_MRW10, },
+ { .bank = 0, .offset = EMC_MRW11, },
+ { .bank = 1, .offset = EMC_MRW11, },
+ { .bank = 0, .offset = EMC_MRW12, },
+ { .bank = 1, .offset = EMC_MRW12, },
+ { .bank = 0, .offset = EMC_MRW13, },
+ { .bank = 1, .offset = EMC_MRW13, },
+ },
+ .trim_per_channel = {
+ { .bank = 0, .offset = EMC_CMD_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_CMD_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_2, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_3, },
+ },
+ .vref_per_channel = {
+ {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ },
+ },
+};
+
+static void tegra210_emc_train(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, training);
+ unsigned long flags;
+
+ if (!emc->last)
+ return;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (emc->sequence->periodic_compensation)
+ emc->sequence->periodic_compensation(emc);
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_start(struct tegra210_emc *emc)
+{
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_stop(struct tegra210_emc *emc)
+{
+ del_timer(&emc->training);
+}
+
+static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
+{
+ unsigned long flags;
+ u32 value, max = 0;
+ unsigned int i;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ for (i = 0; i < emc->num_devices; i++) {
+ value = tegra210_emc_mrr_read(emc, i, 4);
+
+ if (value & BIT(7))
+ dev_dbg(emc->dev,
+ "sensor reading changed for device %u: %08x\n",
+ i, value);
+
+ value = FIELD_GET(LPDDR2_MR4_SRR, value);
+ if (value > max)
+ max = value;
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return max;
+}
+
+static void tegra210_emc_poll_refresh(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+ unsigned int temperature;
+
+ if (!emc->debugfs.temperature)
+ temperature = tegra210_emc_get_temperature(emc);
+ else
+ temperature = emc->debugfs.temperature;
+
+ if (temperature == emc->temperature)
+ goto reset;
+
+ switch (temperature) {
+ case 0 ... 3:
+ /* temperature is fine, using regular refresh */
+ dev_dbg(emc->dev, "switching to nominal refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_NOMINAL);
+ break;
+
+ case 4:
+ dev_dbg(emc->dev, "switching to 2x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_2X);
+ break;
+
+ case 5:
+ dev_dbg(emc->dev, "switching to 4x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_4X);
+ break;
+
+ case 6 ... 7:
+ dev_dbg(emc->dev, "switching to throttle refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_THROTTLE);
+ break;
+
+ default:
+ WARN(1, "invalid DRAM temperature state %u\n", temperature);
+ return;
+ }
+
+ emc->temperature = temperature;
+
+reset:
+ if (atomic_read(&emc->refresh_poll) > 0) {
+ unsigned int interval = emc->refresh_poll_interval;
+ unsigned int timeout = msecs_to_jiffies(interval);
+
+ mod_timer(&emc->refresh_timer, jiffies + timeout);
+ }
+}
+
+static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 0);
+ del_timer_sync(&emc->refresh_timer);
+}
+
+static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 1);
+
+ mod_timer(&emc->refresh_timer,
+ jiffies + msecs_to_jiffies(emc->refresh_poll_interval));
+}
+
+static int tegra210_emc_cd_max_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int tegra210_emc_cd_get_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ *state = atomic_read(&emc->refresh_poll);
+
+ return 0;
+}
+
+static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
+ unsigned long state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ if (state == atomic_read(&emc->refresh_poll))
+ return 0;
+
+ if (state)
+ tegra210_emc_poll_refresh_start(emc);
+ else
+ tegra210_emc_poll_refresh_stop(emc);
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+ .get_max_state = tegra210_emc_cd_max_state,
+ .get_cur_state = tegra210_emc_cd_get_state,
+ .set_cur_state = tegra210_emc_cd_set_state,
+};
+
+static void tegra210_emc_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ emc->sequence->set_clock(emc, clksrc);
+
+ if (emc->next->periodic_training)
+ tegra210_emc_training_start(emc);
+ else
+ tegra210_emc_training_stop(emc);
+}
+
+static void tegra210_change_dll_src(struct tegra210_emc *emc,
+ u32 clksrc)
+{
+ u32 dll_setting = emc->next->dll_clk_src;
+ u32 emc_clk_src;
+ u32 emc_clk_div;
+
+ emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
+ EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
+ emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
+ EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
+ DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
+ dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
+ dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
+ if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
+ dll_setting |= (PLLM_VCOB <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
+ dll_setting |= (PLLM_VCOA <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else
+ dll_setting |= (EMC_DLL_SWITCH_OUT <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+
+ tegra210_clk_emc_dll_update_setting(dll_setting);
+
+ if (emc->next->clk_out_enb_x_0_clk_enb_emc_dll)
+ tegra210_clk_emc_dll_enable(true);
+ else
+ tegra210_clk_emc_dll_enable(false);
+}
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh)
+{
+ struct tegra210_emc_timing *timings;
+ unsigned long flags;
+
+ if ((emc->dram_type != DRAM_TYPE_LPDDR2 &&
+ emc->dram_type != DRAM_TYPE_LPDDR4) ||
+ !emc->last)
+ return -ENODEV;
+
+ if (refresh > TEGRA210_EMC_REFRESH_THROTTLE)
+ return -EINVAL;
+
+ if (refresh == emc->refresh)
+ return 0;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (refresh == TEGRA210_EMC_REFRESH_THROTTLE && emc->derated)
+ timings = emc->derated;
+ else
+ timings = emc->nominal;
+
+ if (timings != emc->timings) {
+ unsigned int index = emc->last - emc->timings;
+ u32 clksrc;
+
+ clksrc = emc->provider.configs[index].value |
+ EMC_CLK_FORCE_CC_TRIGGER;
+
+ emc->next = &timings[index];
+ emc->timings = timings;
+
+ tegra210_emc_set_clock(emc, clksrc);
+ } else {
+ tegra210_emc_adjust_timing(emc, emc->last);
+ tegra210_emc_timing_update(emc);
+
+ if (refresh != TEGRA210_EMC_REFRESH_NOMINAL)
+ emc_writel(emc, EMC_REF_REF_CMD, EMC_REF);
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address)
+{
+ u32 value, ret = 0;
+ unsigned int i;
+
+ value = (chip & EMC_MRR_DEV_SEL_MASK) << EMC_MRR_DEV_SEL_SHIFT |
+ (address & EMC_MRR_MA_MASK) << EMC_MRR_MA_SHIFT;
+ emc_writel(emc, value, EMC_MRR);
+
+ for (i = 0; i < emc->num_channels; i++)
+ WARN(tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_MRR_DIVLD, 1),
+ "Timed out waiting for MRR %u (ch=%u)\n", address, i);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ value = emc_channel_readl(emc, i, EMC_MRR);
+ value &= EMC_MRR_DATA_MASK;
+
+ ret = (ret << 16) | value;
+ }
+
+ return ret;
+}
+
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc)
+{
+ int err;
+
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+ emc_readl(emc, EMC_INTSTATUS);
+
+ tegra210_clk_emc_update_setting(clksrc);
+
+ err = tegra210_emc_wait_for_update(emc, 0, EMC_INTSTATUS,
+ EMC_INTSTATUS_CLKCHANGE_COMPLETE,
+ true);
+ if (err)
+ dev_warn(emc->dev, "clock change completion error: %d\n", err);
+}
+
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (emc->timings[i].rate * 1000UL == rate)
+ return &emc->timings[i];
+
+ return NULL;
+}
+
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+ value = emc_channel_readl(emc, channel, offset);
+ if (!!(value & bit_mask) == state)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set)
+{
+ u32 emc_dbg = emc_readl(emc, EMC_DBG);
+
+ if (set)
+ emc_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ else
+ emc_writel(emc, emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+}
+
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next)
+{
+ if (next->emc_emrs & 0x1)
+ return 0;
+
+ return 1;
+}
+
+void tegra210_emc_timing_update(struct tegra210_emc *emc)
+{
+ unsigned int i;
+ int err = 0;
+
+ emc_writel(emc, 0x1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ err |= tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ false);
+ }
+
+ if (err)
+ dev_warn(emc->dev, "timing update error: %d\n", err);
+}
+
+unsigned long tegra210_emc_actual_osc_clocks(u32 in)
+{
+ if (in < 0x40)
+ return in * 16;
+ else if (in < 0x80)
+ return 2048;
+ else if (in < 0xc0)
+ return 4096;
+ else
+ return 8192;
+}
+
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 mpc_req = 0x4b;
+
+ emc_writel(emc, mpc_req, EMC_MPC);
+ mpc_req = emc_readl(emc, EMC_MPC);
+}
+
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset)
+{
+ u32 temp = 0, rate = next->rate / 1000;
+ s32 delta[4], delta_taps[4];
+ s32 new[] = {
+ TRIM_REG(0, 0, 0, 0),
+ TRIM_REG(0, 0, 0, 1),
+ TRIM_REG(0, 0, 1, 2),
+ TRIM_REG(0, 0, 1, 3),
+
+ TRIM_REG(1, 0, 2, 4),
+ TRIM_REG(1, 0, 2, 5),
+ TRIM_REG(1, 0, 3, 6),
+ TRIM_REG(1, 0, 3, 7),
+
+ TRIM_REG(0, 1, 0, 0),
+ TRIM_REG(0, 1, 0, 1),
+ TRIM_REG(0, 1, 1, 2),
+ TRIM_REG(0, 1, 1, 3),
+
+ TRIM_REG(1, 1, 2, 4),
+ TRIM_REG(1, 1, 2, 5),
+ TRIM_REG(1, 1, 3, 6),
+ TRIM_REG(1, 1, 3, 7)
+ };
+ unsigned i;
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ case EMC_DATA_BRLSHFT_0:
+ delta[0] = 128 * (next->current_dram_clktree[C0D0U0] -
+ next->trained_dram_clktree[C0D0U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D0U1] -
+ next->trained_dram_clktree[C0D0U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D0U0] -
+ next->trained_dram_clktree[C1D0U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D0U1] -
+ next->trained_dram_clktree[C1D0U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[i * 2] = new[i * 2] + delta_taps[i];
+ new[i * 2 + 1] = new[i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_0) {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] % 64;
+ }
+
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ case EMC_DATA_BRLSHFT_1:
+ delta[0] = 128 * (next->current_dram_clktree[C0D1U0] -
+ next->trained_dram_clktree[C0D1U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D1U1] -
+ next->trained_dram_clktree[C0D1U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D1U0] -
+ next->trained_dram_clktree[C1D1U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D1U1] -
+ next->trained_dram_clktree[C1D1U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[8 + i * 2] = new[8 + i * 2] +
+ delta_taps[i];
+ new[8 + i * 2 + 1] = new[8 + i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_1) {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] % 64;
+ }
+
+ break;
+ }
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ temp = CALC_TEMP(0, 0, 0, 1, 0);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ temp = CALC_TEMP(0, 1, 2, 3, 2);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ temp = CALC_TEMP(0, 2, 4, 5, 4);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ temp = CALC_TEMP(0, 3, 6, 7, 6);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ temp = CALC_TEMP(1, 0, 0, 1, 8);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ temp = CALC_TEMP(1, 1, 2, 3, 10);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ temp = CALC_TEMP(1, 2, 4, 5, 12);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ temp = CALC_TEMP(1, 3, 6, 7, 14);
+ break;
+
+ case EMC_DATA_BRLSHFT_0:
+ temp = ((new[0] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[1] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[2] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[3] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[4] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[5] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[6] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[7] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ case EMC_DATA_BRLSHFT_1:
+ temp = ((new[8] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[9] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[10] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[11] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[12] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[13] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[14] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[15] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ default:
+ break;
+ }
+
+ return temp;
+}
+
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ emc_writel(emc, 1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, i, EMC_CFG_DIG_DLL);
+ if ((value & EMC_CFG_DIG_DLL_CFG_DLL_EN) == 0)
+ break;
+ }
+ }
+
+ value = emc->next->burst_regs[EMC_DLL_CFG_0_INDEX];
+ emc_writel(emc, value, EMC_DLL_CFG_0);
+
+ value = emc_readl(emc, EMC_DLL_CFG_1);
+ value &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
+
+ if (emc->next->rate >= 400000 && emc->next->rate < 600000)
+ value |= 150;
+ else if (emc->next->rate >= 600000 && emc->next->rate < 800000)
+ value |= 100;
+ else if (emc->next->rate >= 800000 && emc->next->rate < 1000000)
+ value |= 70;
+ else if (emc->next->rate >= 1000000 && emc->next->rate < 1200000)
+ value |= 30;
+ else
+ value |= 20;
+
+ emc_writel(emc, value, EMC_DLL_CFG_1);
+
+ tegra210_change_dll_src(emc, clksrc);
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, 0, EMC_CFG_DIG_DLL);
+ if (value & EMC_CFG_DIG_DLL_CFG_DLL_EN)
+ break;
+ }
+ }
+
+ while (true) {
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED) == 0)
+ continue;
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_LOCK) == 0)
+ continue;
+
+ break;
+ }
+
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ return value & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 cmd_pad, dq_pad, rfu1, cfg5, common_tx, ramp_up_wait = 0;
+ const struct tegra210_emc_timing *timing;
+
+ if (flip_backward)
+ timing = emc->last;
+ else
+ timing = emc->next;
+
+ cmd_pad = timing->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = timing->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = timing->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = timing->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, common_tx & 0xa,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, common_tx & 0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx | 0x8,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ }
+
+ if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & 0xfeedfeed,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + (10 * clk);
+ } else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, rfu1 | 0x06000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + 10 * clk;
+ } else {
+ ccfifo_writel(emc, rfu1 | 0x00000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_up_wait += 12 * clk;
+ }
+
+ cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
+
+ return ramp_up_wait;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 ramp_down_wait = 0, cmd_pad, dq_pad, rfu1, cfg5, common_tx;
+ const struct tegra210_emc_timing *entry;
+ u32 seq_wait;
+
+ if (flip_backward)
+ entry = emc->next;
+ else
+ entry = emc->last;
+
+ cmd_pad = entry->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = entry->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = entry->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = entry->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = entry->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_down_wait = 12 * clk;
+
+ seq_wait = (100000 / clk) + 1;
+
+ if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & ~0x01bf01bf,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0xffff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
+ ramp_down_wait += 100000 + (20 * clk);
+ }
+
+ if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0x5,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, 0, 0, seq_wait);
+ ramp_down_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ }
+
+ return ramp_down_wait;
+}
+
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing)
+{
+ timing->current_dram_clktree[C0D0U0] =
+ timing->trained_dram_clktree[C0D0U0];
+ timing->current_dram_clktree[C0D0U1] =
+ timing->trained_dram_clktree[C0D0U1];
+ timing->current_dram_clktree[C1D0U0] =
+ timing->trained_dram_clktree[C1D0U0];
+ timing->current_dram_clktree[C1D0U1] =
+ timing->trained_dram_clktree[C1D0U1];
+ timing->current_dram_clktree[C1D1U0] =
+ timing->trained_dram_clktree[C1D1U0];
+ timing->current_dram_clktree[C1D1U1] =
+ timing->trained_dram_clktree[C1D1U1];
+}
+
+static void update_dll_control(struct tegra210_emc *emc, u32 value, bool state)
+{
+ unsigned int i;
+
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN,
+ state);
+}
+
+void tegra210_emc_dll_disable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, false);
+}
+
+void tegra210_emc_dll_enable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, true);
+}
+
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing)
+{
+ u32 dsr_cntrl = timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
+ u32 pre_ref = timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
+ u32 ref = timing->burst_regs[EMC_REFRESH_INDEX];
+
+ switch (emc->refresh) {
+ case TEGRA210_EMC_REFRESH_NOMINAL:
+ case TEGRA210_EMC_REFRESH_THROTTLE:
+ break;
+
+ case TEGRA210_EMC_REFRESH_2X:
+ ref = REFRESH_SPEEDUP(ref, 2);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 2);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 2);
+ break;
+
+ case TEGRA210_EMC_REFRESH_4X:
+ ref = REFRESH_SPEEDUP(ref, 4);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 4);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 4);
+ break;
+
+ default:
+ dev_warn(emc->dev, "failed to set refresh: %d\n", emc->refresh);
+ return;
+ }
+
+ emc_writel(emc, ref, emc->offsets->burst[EMC_REFRESH_INDEX]);
+ emc_writel(emc, pre_ref,
+ emc->offsets->burst[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
+ emc_writel(emc, dsr_cntrl,
+ emc->offsets->burst[EMC_DYN_SELF_REF_CONTROL_INDEX]);
+}
+
+static int tegra210_emc_set_rate(struct device *dev,
+ const struct tegra210_clk_emc_config *config)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timing = NULL;
+ unsigned long rate = config->rate;
+ s64 last_change_delay;
+ unsigned long flags;
+ unsigned int i;
+
+ if (rate == emc->last->rate * 1000UL)
+ return 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing)
+ return -EINVAL;
+
+ if (rate > 204000000 && !timing->trained)
+ return -EINVAL;
+
+ emc->next = timing;
+ last_change_delay = ktime_us_delta(ktime_get(), emc->clkchange_time);
+
+ /* XXX use non-busy-looping sleep? */
+ if ((last_change_delay >= 0) &&
+ (last_change_delay < emc->clkchange_delay))
+ udelay(emc->clkchange_delay - (int)last_change_delay);
+
+ spin_lock_irqsave(&emc->lock, flags);
+ tegra210_emc_set_clock(emc, config->value);
+ emc->clkchange_time = ktime_get();
+ emc->last = timing;
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra210_emc_validate_rate(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate * 1000UL)
+ return true;
+
+ return false;
+}
+
+static int tegra210_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra210_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%u", prefix, emc->timings[i].rate * 1000);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int tegra210_emc_debug_available_rates_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, tegra210_emc_debug_available_rates_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra210_emc_debug_available_rates_fops = {
+ .open = tegra210_emc_debug_available_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra210_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+ tegra210_emc_debug_min_rate_get,
+ tegra210_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+ tegra210_emc_debug_max_rate_get,
+ tegra210_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_temperature_get(void *data, u64 *temperature)
+{
+ struct tegra210_emc *emc = data;
+ unsigned int value;
+
+ if (!emc->debugfs.temperature)
+ value = tegra210_emc_get_temperature(emc);
+ else
+ value = emc->debugfs.temperature;
+
+ *temperature = value;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
+{
+ struct tegra210_emc *emc = data;
+
+ if (temperature > 7)
+ return -EINVAL;
+
+ emc->debugfs.temperature = temperature;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+ tegra210_emc_debug_temperature_get,
+ tegra210_emc_debug_temperature_set, "%llu\n");
+
+static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate * 1000UL;
+
+ if (emc->timings[i].rate * 1000UL > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate * 1000UL;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ if (!emc->debugfs.root) {
+ dev_err(dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra210_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_max_rate_fops);
+ debugfs_create_file("temperature", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_temperature_fops);
+}
+
+static void tegra210_emc_detect(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ /* probe the number of connected DRAM devices */
+ value = mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ if (value & MC_EMEM_ADR_CFG_EMEM_NUMDEV)
+ emc->num_devices = 2;
+ else
+ emc->num_devices = 1;
+
+ /* probe the type of DRAM */
+ value = emc_readl(emc, EMC_FBIO_CFG5);
+ emc->dram_type = value & 0x3;
+
+ /* probe the number of channels */
+ value = emc_readl(emc, EMC_FBIO_CFG7);
+
+ if ((value & EMC_FBIO_CFG7_CH1_ENABLE) &&
+ (value & EMC_FBIO_CFG7_CH0_ENABLE))
+ emc->num_channels = 2;
+ else
+ emc->num_channels = 1;
+}
+
+static int tegra210_emc_validate_timings(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timings,
+ unsigned int num_timings)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_timings; i++) {
+ u32 min_volt = timings[i].min_volt;
+ u32 rate = timings[i].rate;
+
+ if (!rate)
+ return -EINVAL;
+
+ if ((i > 0) && ((rate <= timings[i - 1].rate) ||
+ (min_volt < timings[i - 1].min_volt)))
+ return -EINVAL;
+
+ if (timings[i].revision != timings[0].revision)
+ continue;
+ }
+
+ return 0;
+}
+
+static int tegra210_emc_probe(struct platform_device *pdev)
+{
+ struct thermal_cooling_device *cd;
+ unsigned long current_rate;
+ struct platform_device *mc;
+ struct tegra210_emc *emc;
+ struct device_node *np;
+ unsigned int i;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk))
+ return PTR_ERR(emc->clk);
+
+ platform_set_drvdata(pdev, emc);
+ spin_lock_init(&emc->lock);
+ emc->dev = &pdev->dev;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "could not get memory controller\n");
+ return -ENOENT;
+ }
+
+ mc = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!mc)
+ return -ENOENT;
+
+ emc->mc = platform_get_drvdata(mc);
+ if (!emc->mc) {
+ put_device(&mc->dev);
+ return -EPROBE_DEFER;
+ }
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs)) {
+ err = PTR_ERR(emc->regs);
+ goto put_mc;
+ }
+
+ for (i = 0; i < 2; i++) {
+ emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
+ if (IS_ERR(emc->channel[i])) {
+ err = PTR_ERR(emc->channel[i]);
+ goto put_mc;
+ }
+ }
+
+ tegra210_emc_detect(emc);
+ np = pdev->dev.of_node;
+
+ /* attach to the nominal and (optional) derated tables */
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
+ if (err < 0) {
+ dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
+ goto put_mc;
+ }
+
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
+ if (err < 0 && err != -ENODEV) {
+ dev_err(emc->dev, "failed to get derated EMC table: %d\n", err);
+ goto release;
+ }
+
+ /* validate the tables */
+ if (emc->nominal) {
+ err = tegra210_emc_validate_timings(emc, emc->nominal,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ if (emc->derated) {
+ err = tegra210_emc_validate_timings(emc, emc->derated,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ /* default to the nominal table */
+ emc->timings = emc->nominal;
+
+ /* pick the current timing based on the current EMC clock rate */
+ current_rate = clk_get_rate(emc->clk) / 1000;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == current_rate) {
+ emc->last = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (i == emc->num_timings) {
+ dev_err(emc->dev, "no EMC table entry found for %lu kHz\n",
+ current_rate);
+ err = -ENOENT;
+ goto release;
+ }
+
+ /* pick a compatible clock change sequence for the EMC table */
+ for (i = 0; i < ARRAY_SIZE(tegra210_emc_sequences); i++) {
+ const struct tegra210_emc_sequence *sequence =
+ tegra210_emc_sequences[i];
+
+ if (emc->timings[0].revision == sequence->revision) {
+ emc->sequence = sequence;
+ break;
+ }
+ }
+
+ if (!emc->sequence) {
+ dev_err(&pdev->dev, "sequence %u not supported\n",
+ emc->timings[0].revision);
+ err = -ENOTSUPP;
+ goto release;
+ }
+
+ emc->offsets = &tegra210_emc_table_register_offsets;
+ emc->refresh = TEGRA210_EMC_REFRESH_NOMINAL;
+
+ emc->provider.owner = THIS_MODULE;
+ emc->provider.dev = &pdev->dev;
+ emc->provider.set_rate = tegra210_emc_set_rate;
+
+ emc->provider.configs = devm_kcalloc(&pdev->dev, emc->num_timings,
+ sizeof(*emc->provider.configs),
+ GFP_KERNEL);
+ if (!emc->provider.configs) {
+ err = -ENOMEM;
+ goto release;
+ }
+
+ emc->provider.num_configs = emc->num_timings;
+
+ for (i = 0; i < emc->provider.num_configs; i++) {
+ struct tegra210_emc_timing *timing = &emc->timings[i];
+ struct tegra210_clk_emc_config *config =
+ &emc->provider.configs[i];
+ u32 value;
+
+ config->rate = timing->rate * 1000UL;
+ config->value = timing->clk_src_emc;
+
+ value = timing->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX];
+
+ if ((value & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ) == 0)
+ config->same_freq = false;
+ else
+ config->same_freq = true;
+ }
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to attach to EMC clock: %d\n", err);
+ goto release;
+ }
+
+ emc->clkchange_delay = 100;
+ emc->training_interval = 100;
+ dev_set_drvdata(emc->dev, emc);
+
+ timer_setup(&emc->refresh_timer, tegra210_emc_poll_refresh,
+ TIMER_DEFERRABLE);
+ atomic_set(&emc->refresh_poll, 0);
+ emc->refresh_poll_interval = 1000;
+
+ timer_setup(&emc->training, tegra210_emc_train, 0);
+
+ tegra210_emc_debugfs_init(emc);
+
+ cd = devm_thermal_of_cooling_device_register(emc->dev, np, "emc", emc,
+ &tegra210_emc_cd_ops);
+ if (IS_ERR(cd)) {
+ err = PTR_ERR(cd);
+ dev_err(emc->dev, "failed to register cooling device: %d\n",
+ err);
+ goto detach;
+ }
+
+ return 0;
+
+detach:
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+release:
+ of_reserved_mem_device_release(emc->dev);
+put_mc:
+ put_device(emc->mc->dev);
+ return err;
+}
+
+static int tegra210_emc_remove(struct platform_device *pdev)
+{
+ struct tegra210_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+ of_reserved_mem_device_release(emc->dev);
+ put_device(emc->mc->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_suspend(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to acquire clock: %d\n", err);
+ return err;
+ }
+
+ emc->resume_rate = clk_get_rate(emc->clk);
+
+ clk_set_rate(emc->clk, 204000000);
+ tegra210_clk_emc_detach(emc->clk);
+
+ dev_dbg(dev, "suspending at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_resume(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(dev, "failed to attach to EMC clock: %d\n", err);
+ return err;
+ }
+
+ clk_set_rate(emc->clk, emc->resume_rate);
+ clk_rate_exclusive_put(emc->clk);
+
+ dev_dbg(dev, "resuming at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra210_emc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra210_emc_suspend, tegra210_emc_resume)
+};
+
+static const struct of_device_id tegra210_emc_of_match[] = {
+ { .compatible = "nvidia,tegra210-emc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra210_emc_of_match);
+
+static struct platform_driver tegra210_emc_driver = {
+ .driver = {
+ .name = "tegra210-emc",
+ .of_match_table = tegra210_emc_of_match,
+ .pm = &tegra210_emc_pm_ops,
+ },
+ .probe = tegra210_emc_probe,
+ .remove = tegra210_emc_remove,
+};
+
+module_platform_driver(tegra210_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Joseph Lo <josephl@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra210 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-table.c b/drivers/memory/tegra/tegra210-emc-table.c
new file mode 100644
index 000000000000..3e0598363b87
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-table.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "tegra210-emc.h"
+
+#define TEGRA_EMC_MAX_FREQS 16
+
+static int tegra210_emc_table_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timings;
+ unsigned int i, count = 0;
+
+ timings = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!timings) {
+ dev_err(dev, "failed to map EMC table\n");
+ return -ENOMEM;
+ }
+
+ count = 0;
+
+ for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
+ if (timings[i].revision == 0)
+ break;
+
+ count++;
+ }
+
+ /* only the nominal and derated tables are expected */
+ if (emc->derated) {
+ dev_warn(dev, "excess EMC table '%s'\n", rmem->name);
+ goto out;
+ }
+
+ if (emc->nominal) {
+ if (count != emc->num_timings) {
+ dev_warn(dev, "%u derated vs. %u nominal entries\n",
+ count, emc->num_timings);
+ memunmap(timings);
+ return -EINVAL;
+ }
+
+ emc->derated = timings;
+ } else {
+ emc->num_timings = count;
+ emc->nominal = timings;
+ }
+
+out:
+ /* keep track of which table this is */
+ rmem->priv = timings;
+
+ return 0;
+}
+
+static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc_timing *timings = rmem->priv;
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+
+ if ((emc->nominal && timings != emc->nominal) &&
+ (emc->derated && timings != emc->derated))
+ dev_warn(dev, "trying to release unassigned EMC table '%s'\n",
+ rmem->name);
+
+ memunmap(timings);
+}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+ .device_init = tegra210_emc_table_device_init,
+ .device_release = tegra210_emc_table_device_release,
+};
+
+static int tegra210_emc_table_init(struct reserved_mem *rmem)
+{
+ pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
+ (unsigned long)rmem->size);
+
+ rmem->ops = &tegra210_emc_table_ops;
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
+ tegra210_emc_table_init);
diff --git a/drivers/memory/tegra/tegra210-emc.h b/drivers/memory/tegra/tegra210-emc.h
new file mode 100644
index 000000000000..8988bcf15290
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc.h
@@ -0,0 +1,1016 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_EMC_H
+#define TEGRA210_EMC_H
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define DVFS_FGCG_HIGH_SPEED_THRESHOLD 1000
+#define IOBRICK_DCC_THRESHOLD 2400
+#define DVFS_FGCG_MID_SPEED_THRESHOLD 600
+
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+/* register definitions */
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+#define EMC_DBG 0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_WRITE_ACTIVE_ONLY BIT(30)
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SELF_REF BIT(28)
+#define EMC_PIN 0x24
+#define EMC_PIN_PIN_CKE BIT(0)
+#define EMC_PIN_PIN_CKEB BIT(1)
+#define EMC_PIN_PIN_CKE_PER_DEV BIT(2)
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_TPPD 0xac
+#define EMC_ODT_WRITE 0xb0
+#define EMC_PDEX2MRR 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+#define EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT 16
+#define EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT 0
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_EMRS 0xd0
+#define EMC_EMRS_USE_EMRS_LONG_CNT BIT(26)
+#define EMC_REF 0xd4
+#define EMC_REF_REF_CMD BIT(0)
+#define EMC_SELF_REF 0xe0
+#define EMC_MRW 0xe8
+#define EMC_MRW_MRW_OP_SHIFT 0
+#define EMC_MRW_MRW_OP_MASK \
+ (0xff << EMC_MRW_MRW_OP_SHIFT)
+#define EMC_MRW_MRW_MA_SHIFT 16
+#define EMC_MRW_USE_MRW_EXT_CNT 27
+#define EMC_MRW_MRW_DEV_SELECTN_SHIFT 30
+
+#define EMC_MRR 0xec
+#define EMC_MRR_DEV_SEL_SHIFT 30
+#define EMC_MRR_DEV_SEL_MASK 0x3
+#define EMC_MRR_MA_SHIFT 16
+#define EMC_MRR_MA_MASK 0xff
+#define EMC_MRR_DATA_SHIFT 0
+#define EMC_MRR_DATA_MASK 0xffff
+
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK \
+ (0x3 << EMC_FBIO_CFG5_DRAM_TYPE_SHIFT)
+#define EMC_FBIO_CFG5_CMD_TX_DIS BIT(8)
+
+#define EMC_PDEX2CKE 0x118
+#define EMC_CKE2PDEN 0x11c
+#define EMC_MPC 0x128
+#define EMC_EMRS2 0x12c
+#define EMC_EMRS2_USE_EMRS2_LONG_CNT BIT(26)
+#define EMC_MRW2 0x134
+#define EMC_MRW3 0x138
+#define EMC_MRW4 0x13c
+#define EMC_R2R 0x144
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START BIT(0)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL BIT(9)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL BIT(10)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE BIT(29)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_EMC_STATUS 0x2b4
+#define EMC_EMC_STATUS_MRR_DIVLD BIT(20)
+#define EMC_EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT 4
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT)
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT 8
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_CFG_DLL_EN BIT(0)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK BIT(1)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC BIT(3)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK BIT(4)
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT 6
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK \
+ (0x3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT)
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT 8
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK \
+ (0x7 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_DIG_DLL_STATUS 0x2c4
+#define EMC_DIG_DLL_STATUS_DLL_LOCK BIT(15)
+#define EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED BIT(17)
+#define EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT 0
+#define EMC_DIG_DLL_STATUS_DLL_OUT_MASK \
+ (0x7ff << EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_1 0x2c8
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_RDV_EARLY_MASK 0x2d4
+#define EMC_RDV_EARLY 0x2d8
+#define EMC_AUTO_CAL_CONFIG8 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK 0x7ff
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT 0
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_DEV_SEL_SHIFT 30
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_ZQ_LATCH_CMD BIT(1)
+#define EMC_ZQ_CAL_ZQ_CAL_CMD BIT(0)
+#define EMC_FDPD_CTRL_DQ 0x310
+#define EMC_FDPD_CTRL_CMD 0x314
+#define EMC_PMACRO_CMD_BRICK_CTRL_FDPD 0x318
+#define EMC_PMACRO_DATA_BRICK_CTRL_FDPD 0x31c
+#define EMC_PMACRO_BRICK_CTRL_RFU1 0x330
+#define EMC_PMACRO_BRICK_CTRL_RFU2 0x334
+#define EMC_TR_TIMING_0 0x3b4
+#define EMC_TR_CTRL_1 0x3bc
+#define EMC_TR_RDV 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN BIT(2)
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_ADDR_STALL_BY_1 (1 << 31)
+#define EMC_CCFIFO_ADDR_STALL(x) (((x) & 0x7fff) << 16)
+#define EMC_CCFIFO_ADDR_OFFSET(x) ((x) & 0xffff)
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_TR_QPOP 0x3f4
+#define EMC_TR_RDV_MASK 0x3f8
+#define EMC_TR_QSAFE 0x3fc
+#define EMC_TR_QRST 0x400
+#define EMC_ISSUE_QRST 0x428
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_TR_DVFS 0x460
+#define EMC_AUTO_CAL_CHANNEL 0x464
+#define EMC_IBDLY 0x468
+#define EMC_OBDLY 0x46c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_WE_DURATION 0x48c
+#define EMC_WS_DURATION 0x490
+#define EMC_WEV 0x494
+#define EMC_WSV 0x498
+#define EMC_CFG_3 0x49c
+#define EMC_MRW6 0x4a4
+#define EMC_MRW7 0x4a8
+#define EMC_MRW8 0x4ac
+#define EMC_MRW9 0x4b0
+#define EMC_MRW10 0x4b4
+#define EMC_MRW11 0x4b8
+#define EMC_MRW12 0x4bc
+#define EMC_MRW13 0x4c0
+#define EMC_MRW14 0x4c4
+#define EMC_MRW15 0x4d0
+#define EMC_CFG_SYNC 0x4d4
+#define EMC_FDPD_CTRL_CMD_NO_RAMP 0x4d8
+#define EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE BIT(0)
+#define EMC_WDV_CHK 0x4e0
+#define EMC_CFG_PIPE_2 0x554
+#define EMC_CFG_PIPE_CLK 0x558
+#define EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON BIT(0)
+#define EMC_CFG_PIPE_1 0x55c
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_AUTO_CAL_CONFIG7 0x574
+#define EMC_REFCTRL2 0x580
+#define EMC_FBIO_CFG7 0x584
+#define EMC_FBIO_CFG7_CH0_ENABLE BIT(1)
+#define EMC_FBIO_CFG7_CH1_ENABLE BIT(2)
+#define EMC_DATA_BRLSHFT_0 0x588
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_DATA_BRLSHFT_1 0x58c
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_RFCPB 0x590
+#define EMC_DQS_BRLSHFT_0 0x594
+#define EMC_DQS_BRLSHFT_1 0x598
+#define EMC_CMD_BRLSHFT_0 0x59c
+#define EMC_CMD_BRLSHFT_1 0x5a0
+#define EMC_CMD_BRLSHFT_2 0x5a4
+#define EMC_CMD_BRLSHFT_3 0x5a8
+#define EMC_QUSE_BRLSHFT_0 0x5ac
+#define EMC_AUTO_CAL_CONFIG4 0x5b0
+#define EMC_AUTO_CAL_CONFIG5 0x5b4
+#define EMC_QUSE_BRLSHFT_1 0x5b8
+#define EMC_QUSE_BRLSHFT_2 0x5bc
+#define EMC_CCDMW 0x5c0
+#define EMC_QUSE_BRLSHFT_3 0x5c4
+#define EMC_AUTO_CAL_CONFIG6 0x5cc
+#define EMC_DLL_CFG_0 0x5e4
+#define EMC_DLL_CFG_1 0x5e8
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT 10
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK \
+ (0x7ff << EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT)
+
+#define EMC_CONFIG_SAMPLE_DELAY 0x5f0
+#define EMC_CFG_UPDATE 0x5f4
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT 9
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK \
+ (0x3 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT)
+
+#define EMC_PMACRO_QUSE_DDLL_RANK0_0 0x600
+#define EMC_PMACRO_QUSE_DDLL_RANK0_1 0x604
+#define EMC_PMACRO_QUSE_DDLL_RANK0_2 0x608
+#define EMC_PMACRO_QUSE_DDLL_RANK0_3 0x60c
+#define EMC_PMACRO_QUSE_DDLL_RANK0_4 0x610
+#define EMC_PMACRO_QUSE_DDLL_RANK0_5 0x614
+#define EMC_PMACRO_QUSE_DDLL_RANK1_0 0x620
+#define EMC_PMACRO_QUSE_DDLL_RANK1_1 0x624
+#define EMC_PMACRO_QUSE_DDLL_RANK1_2 0x628
+#define EMC_PMACRO_QUSE_DDLL_RANK1_3 0x62c
+#define EMC_PMACRO_QUSE_DDLL_RANK1_4 0x630
+#define EMC_PMACRO_QUSE_DDLL_RANK1_5 0x634
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 0x640
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 0x644
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 0x648
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 0x64c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4 0x650
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5 0x654
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 0x660
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 0x664
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 0x668
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 0x66c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4 0x670
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5 0x674
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0 0x680
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1 0x684
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2 0x688
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3 0x68c
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4 0x690
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5 0x694
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0 0x6a0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1 0x6a4
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2 0x6a8
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3 0x6ac
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4 0x6b0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5 0x6b4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0 0x6c0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1 0x6c4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2 0x6c8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3 0x6cc
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0 0x6e0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1 0x6e4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2 0x6e8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3 0x6ec
+#define EMC_PMACRO_TX_PWRD_0 0x720
+#define EMC_PMACRO_TX_PWRD_1 0x724
+#define EMC_PMACRO_TX_PWRD_2 0x728
+#define EMC_PMACRO_TX_PWRD_3 0x72c
+#define EMC_PMACRO_TX_PWRD_4 0x730
+#define EMC_PMACRO_TX_PWRD_5 0x734
+#define EMC_PMACRO_TX_SEL_CLK_SRC_0 0x740
+#define EMC_PMACRO_TX_SEL_CLK_SRC_1 0x744
+#define EMC_PMACRO_TX_SEL_CLK_SRC_3 0x74c
+#define EMC_PMACRO_TX_SEL_CLK_SRC_2 0x748
+#define EMC_PMACRO_TX_SEL_CLK_SRC_4 0x750
+#define EMC_PMACRO_TX_SEL_CLK_SRC_5 0x754
+#define EMC_PMACRO_DDLL_BYPASS 0x760
+#define EMC_PMACRO_DDLL_PWRD_0 0x770
+#define EMC_PMACRO_DDLL_PWRD_1 0x774
+#define EMC_PMACRO_DDLL_PWRD_2 0x778
+#define EMC_PMACRO_CMD_CTRL_0 0x780
+#define EMC_PMACRO_CMD_CTRL_1 0x784
+#define EMC_PMACRO_CMD_CTRL_2 0x788
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0x800
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0x804
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0x808
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3 0x80c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0x810
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0x814
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0x818
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3 0x81c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0x820
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0x824
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0x828
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3 0x82c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0x830
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0x834
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0x838
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3 0x83c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0x840
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0x844
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0x848
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3 0x84c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0x850
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0x854
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0x858
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3 0x85c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0x860
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0x864
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0x868
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3 0x86c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0x870
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0x874
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0x878
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3 0x87c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0 0x880
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1 0x884
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2 0x888
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3 0x88c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0 0x890
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1 0x894
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2 0x898
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3 0x89c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0 0x8a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1 0x8a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2 0x8a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3 0x8ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0 0x8b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1 0x8b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2 0x8b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3 0x8bc
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0x900
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0x904
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0x908
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3 0x90c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0x910
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0x914
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0x918
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3 0x91c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0x920
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0x924
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0x928
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3 0x92c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0x930
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0x934
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0x938
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3 0x93c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0x940
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0x944
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0x948
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3 0x94c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0x950
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0x954
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0x958
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3 0x95c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0x960
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0x964
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0x968
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3 0x96c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0x970
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0x974
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0x978
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3 0x97c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0 0x980
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1 0x984
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2 0x988
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3 0x98c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0 0x990
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1 0x994
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2 0x998
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3 0x99c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0 0x9a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1 0x9a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2 0x9a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3 0x9ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0 0x9b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1 0x9b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2 0x9b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3 0x9bc
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0xa00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0xa04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0xa08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0xa10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0xa14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0xa18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0xa20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0xa24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0xa28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0xa30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0xa34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0xa38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0xa40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0xa44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0xa48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0xa50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0xa54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0xa58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0xa60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0xa64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0xa68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0xa70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0xa74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0xa78
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0xb00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0xb04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0xb08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0xb10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0xb14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0xb18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0xb20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0xb24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0xb28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0xb30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0xb34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0xb38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0xb40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0xb44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0xb48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0xb50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0xb54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0xb58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0xb60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0xb64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0xb68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0xb70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0xb74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0xb78
+#define EMC_PMACRO_IB_VREF_DQ_0 0xbe0
+#define EMC_PMACRO_IB_VREF_DQ_1 0xbe4
+#define EMC_PMACRO_IB_VREF_DQS_0 0xbf0
+#define EMC_PMACRO_IB_VREF_DQS_1 0xbf4
+#define EMC_PMACRO_DDLL_LONG_CMD_0 0xc00
+#define EMC_PMACRO_DDLL_LONG_CMD_1 0xc04
+#define EMC_PMACRO_DDLL_LONG_CMD_2 0xc08
+#define EMC_PMACRO_DDLL_LONG_CMD_3 0xc0c
+#define EMC_PMACRO_DDLL_LONG_CMD_4 0xc10
+#define EMC_PMACRO_DDLL_LONG_CMD_5 0xc14
+#define EMC_PMACRO_DDLL_SHORT_CMD_0 0xc20
+#define EMC_PMACRO_DDLL_SHORT_CMD_1 0xc24
+#define EMC_PMACRO_DDLL_SHORT_CMD_2 0xc28
+#define EMC_PMACRO_CFG_PM_GLOBAL_0 0xc30
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 BIT(16)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 BIT(17)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 BIT(18)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 BIT(19)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 BIT(20)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 BIT(21)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 BIT(22)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7 BIT(23)
+#define EMC_PMACRO_VTTGEN_CTRL_0 0xc34
+#define EMC_PMACRO_VTTGEN_CTRL_1 0xc38
+#define EMC_PMACRO_BG_BIAS_CTRL_0 0xc3c
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD BIT(0)
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD BIT(2)
+#define EMC_PMACRO_PAD_CFG_CTRL 0xc40
+#define EMC_PMACRO_ZCTRL 0xc44
+#define EMC_PMACRO_CMD_PAD_RX_CTRL 0xc50
+#define EMC_PMACRO_DATA_PAD_RX_CTRL 0xc54
+#define EMC_PMACRO_CMD_RX_TERM_MODE 0xc58
+#define EMC_PMACRO_DATA_RX_TERM_MODE 0xc5c
+#define EMC_PMACRO_CMD_PAD_TX_CTRL 0xc60
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC BIT(24)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON BIT(26)
+
+#define EMC_PMACRO_DATA_PAD_TX_CTRL 0xc64
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF BIT(0)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF BIT(8)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC BIT(24)
+
+#define EMC_PMACRO_COMMON_PAD_TX_CTRL 0xc68
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON 0xc78
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS BIT(16)
+#define EMC_PMACRO_VTTGEN_CTRL_2 0xcf0
+#define EMC_PMACRO_IB_RXRT 0xcf4
+#define EMC_PMACRO_TRAINING_CTRL_0 0xcf8
+#define EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR BIT(3)
+#define EMC_PMACRO_TRAINING_CTRL_1 0xcfc
+#define EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR BIT(3)
+#define EMC_TRAINING_CTRL 0xe04
+#define EMC_TRAINING_QUSE_CORS_CTRL 0xe0c
+#define EMC_TRAINING_QUSE_FINE_CTRL 0xe10
+#define EMC_TRAINING_QUSE_CTRL_MISC 0xe14
+#define EMC_TRAINING_WRITE_FINE_CTRL 0xe18
+#define EMC_TRAINING_WRITE_CTRL_MISC 0xe1c
+#define EMC_TRAINING_WRITE_VREF_CTRL 0xe20
+#define EMC_TRAINING_READ_FINE_CTRL 0xe24
+#define EMC_TRAINING_READ_CTRL_MISC 0xe28
+#define EMC_TRAINING_READ_VREF_CTRL 0xe2c
+#define EMC_TRAINING_CA_FINE_CTRL 0xe30
+#define EMC_TRAINING_CA_CTRL_MISC 0xe34
+#define EMC_TRAINING_CA_CTRL_MISC1 0xe38
+#define EMC_TRAINING_CA_VREF_CTRL 0xe3c
+#define EMC_TRAINING_SETTLE 0xe44
+#define EMC_TRAINING_MPC 0xe5c
+#define EMC_TRAINING_VREF_SETTLE 0xe6c
+#define EMC_TRAINING_QUSE_VREF_CTRL 0xed0
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK0 0xed4
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK1 0xed8
+
+#define EMC_COPY_TABLE_PARAM_PERIODIC_FIELDS BIT(0)
+#define EMC_COPY_TABLE_PARAM_TRIM_REGS BIT(1)
+
+enum burst_regs_list {
+ EMC_RP_INDEX = 6,
+ EMC_R2P_INDEX = 9,
+ EMC_W2P_INDEX,
+ EMC_MRW6_INDEX = 31,
+ EMC_REFRESH_INDEX = 41,
+ EMC_PRE_REFRESH_REQ_CNT_INDEX = 43,
+ EMC_TRPAB_INDEX = 59,
+ EMC_MRW7_INDEX = 62,
+ EMC_FBIO_CFG5_INDEX = 65,
+ EMC_FBIO_CFG7_INDEX,
+ EMC_CFG_DIG_DLL_INDEX,
+ EMC_ZCAL_INTERVAL_INDEX = 139,
+ EMC_ZCAL_WAIT_CNT_INDEX,
+ EMC_MRS_WAIT_CNT_INDEX = 141,
+ EMC_DLL_CFG_0_INDEX = 144,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX = 146,
+ EMC_CFG_INDEX = 148,
+ EMC_DYN_SELF_REF_CONTROL_INDEX = 150,
+ EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX = 161,
+ EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_BRICK_CTRL_RFU1_INDEX = 167,
+ EMC_PMACRO_BG_BIAS_CTRL_0_INDEX = 171,
+ EMC_MRW14_INDEX = 199,
+ EMC_MRW15_INDEX = 220,
+};
+
+enum trim_regs_list {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_INDEX = 60,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_INDEX,
+};
+
+enum burst_mc_regs_list {
+ MC_EMEM_ARB_MISC0_INDEX = 20,
+};
+
+enum {
+ T_RP,
+ T_FC_LPDDR4,
+ T_RFC,
+ T_PDEX,
+ RL,
+};
+
+enum {
+ AUTO_PD = 0,
+ MAN_SR = 2,
+};
+
+enum {
+ ASSEMBLY = 0,
+ ACTIVE,
+};
+
+enum {
+ C0D0U0,
+ C0D0U1,
+ C0D1U0,
+ C0D1U1,
+ C1D0U0,
+ C1D0U1,
+ C1D1U0,
+ C1D1U1,
+ DRAM_CLKTREE_NUM,
+};
+
+#define VREF_REGS_PER_CHANNEL_SIZE 4
+#define DRAM_TIMINGS_NUM 5
+#define BURST_REGS_PER_CHANNEL_SIZE 8
+#define TRIM_REGS_PER_CHANNEL_SIZE 10
+#define PTFV_ARRAY_SIZE 12
+#define SAVE_RESTORE_MOD_REGS_SIZE 12
+#define TRAINING_MOD_REGS_SIZE 20
+#define BURST_UP_DOWN_REGS_SIZE 24
+#define BURST_MC_REGS_SIZE 33
+#define TRIM_REGS_SIZE 138
+#define BURST_REGS_SIZE 221
+
+struct tegra210_emc_per_channel_regs {
+ u16 bank;
+ u16 offset;
+};
+
+struct tegra210_emc_table_register_offsets {
+ u16 burst[BURST_REGS_SIZE];
+ u16 trim[TRIM_REGS_SIZE];
+ u16 burst_mc[BURST_MC_REGS_SIZE];
+ u16 la_scale[BURST_UP_DOWN_REGS_SIZE];
+ struct tegra210_emc_per_channel_regs burst_per_channel[BURST_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs trim_per_channel[TRIM_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs vref_per_channel[VREF_REGS_PER_CHANNEL_SIZE];
+};
+
+struct tegra210_emc_timing {
+ u32 revision;
+ const char dvfs_ver[60];
+ u32 rate;
+ u32 min_volt;
+ u32 gpu_min_volt;
+ const char clock_src[32];
+ u32 clk_src_emc;
+ u32 needs_training;
+ u32 training_pattern;
+ u32 trained;
+
+ u32 periodic_training;
+ u32 trained_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 current_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 run_clocks;
+ u32 tree_margin;
+
+ u32 num_burst;
+ u32 num_burst_per_ch;
+ u32 num_trim;
+ u32 num_trim_per_ch;
+ u32 num_mc_regs;
+ u32 num_up_down;
+ u32 vref_num;
+ u32 training_mod_num;
+ u32 dram_timing_num;
+
+ u32 ptfv_list[PTFV_ARRAY_SIZE];
+
+ u32 burst_regs[BURST_REGS_SIZE];
+ u32 burst_reg_per_ch[BURST_REGS_PER_CHANNEL_SIZE];
+ u32 shadow_regs_ca_train[BURST_REGS_SIZE];
+ u32 shadow_regs_quse_train[BURST_REGS_SIZE];
+ u32 shadow_regs_rdwr_train[BURST_REGS_SIZE];
+
+ u32 trim_regs[TRIM_REGS_SIZE];
+ u32 trim_perch_regs[TRIM_REGS_PER_CHANNEL_SIZE];
+
+ u32 vref_perch_regs[VREF_REGS_PER_CHANNEL_SIZE];
+
+ u32 dram_timings[DRAM_TIMINGS_NUM];
+ u32 training_mod_regs[TRAINING_MOD_REGS_SIZE];
+ u32 save_restore_mod_regs[SAVE_RESTORE_MOD_REGS_SIZE];
+ u32 burst_mc_regs[BURST_MC_REGS_SIZE];
+ u32 la_scale_regs[BURST_UP_DOWN_REGS_SIZE];
+
+ u32 min_mrs_wait;
+ u32 emc_mrw;
+ u32 emc_mrw2;
+ u32 emc_mrw3;
+ u32 emc_mrw4;
+ u32 emc_mrw9;
+ u32 emc_mrs;
+ u32 emc_emrs;
+ u32 emc_emrs2;
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_config4;
+ u32 emc_auto_cal_config5;
+ u32 emc_auto_cal_config6;
+ u32 emc_auto_cal_config7;
+ u32 emc_auto_cal_config8;
+ u32 emc_cfg_2;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_fdpd_ctrl_cmd_no_ramp;
+ u32 dll_clk_src;
+ u32 clk_out_enb_x_0_clk_enb_emc_dll;
+ u32 latency;
+};
+
+enum tegra210_emc_refresh {
+ TEGRA210_EMC_REFRESH_NOMINAL = 0,
+ TEGRA210_EMC_REFRESH_2X,
+ TEGRA210_EMC_REFRESH_4X,
+ TEGRA210_EMC_REFRESH_THROTTLE, /* 4x Refresh + derating. */
+};
+
+#define DRAM_TYPE_DDR3 0
+#define DRAM_TYPE_LPDDR4 1
+#define DRAM_TYPE_LPDDR2 2
+#define DRAM_TYPE_DDR2 3
+
+struct tegra210_emc {
+ struct tegra_mc *mc;
+ struct device *dev;
+ struct clk *clk;
+
+ /* nominal EMC frequency table */
+ struct tegra210_emc_timing *nominal;
+ /* derated EMC frequency table */
+ struct tegra210_emc_timing *derated;
+
+ /* currently selected table (nominal or derated) */
+ struct tegra210_emc_timing *timings;
+ unsigned int num_timings;
+
+ const struct tegra210_emc_table_register_offsets *offsets;
+
+ const struct tegra210_emc_sequence *sequence;
+ spinlock_t lock;
+
+ void __iomem *regs, *channel[2];
+ unsigned int num_channels;
+ unsigned int num_devices;
+ unsigned int dram_type;
+
+ struct tegra210_emc_timing *last;
+ struct tegra210_emc_timing *next;
+
+ unsigned int training_interval;
+ struct timer_list training;
+
+ enum tegra210_emc_refresh refresh;
+ unsigned int refresh_poll_interval;
+ struct timer_list refresh_timer;
+ unsigned int temperature;
+ atomic_t refresh_poll;
+
+ ktime_t clkchange_time;
+ int clkchange_delay;
+
+ unsigned long resume_rate;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned int temperature;
+ } debugfs;
+
+ struct tegra210_clk_emc_provider provider;
+};
+
+struct tegra210_emc_sequence {
+ u8 revision;
+ void (*set_clock)(struct tegra210_emc *emc, u32 clksrc);
+ u32 (*periodic_compensation)(struct tegra210_emc *emc);
+};
+
+static inline void emc_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset)
+{
+ writel_relaxed(value, emc->regs + offset);
+}
+
+static inline u32 emc_readl(struct tegra210_emc *emc, unsigned int offset)
+{
+ return readl_relaxed(emc->regs + offset);
+}
+
+static inline void emc_channel_writel(struct tegra210_emc *emc,
+ unsigned int channel,
+ u32 value, unsigned int offset)
+{
+ writel_relaxed(value, emc->channel[channel] + offset);
+}
+
+static inline u32 emc_channel_readl(struct tegra210_emc *emc,
+ unsigned int channel, unsigned int offset)
+{
+ return readl_relaxed(emc->channel[channel] + offset);
+}
+
+static inline void ccfifo_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset, u32 delay)
+{
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_DATA);
+
+ value = EMC_CCFIFO_ADDR_STALL_BY_1 | EMC_CCFIFO_ADDR_STALL(delay) |
+ EMC_CCFIFO_ADDR_OFFSET(offset);
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static inline u32 div_o3(u32 a, u32 b)
+{
+ u32 result = a / b;
+
+ if ((b * result) < a)
+ return result + 1;
+
+ return result;
+}
+
+/* from tegra210-emc-r21021.c */
+extern const struct tegra210_emc_sequence tegra210_emc_r21021;
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh);
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address);
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc);
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set);
+void tegra210_emc_timing_update(struct tegra210_emc *emc);
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next);
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate);
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing);
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state);
+unsigned long tegra210_emc_actual_osc_clocks(u32 in);
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset);
+void tegra210_emc_dll_disable(struct tegra210_emc *emc);
+void tegra210_emc_dll_enable(struct tegra210_emc *emc);
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc);
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing);
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc);
+
+#endif
diff --git a/drivers/memory/tegra/tegra210-mc.h b/drivers/memory/tegra/tegra210-mc.h
new file mode 100644
index 000000000000..b9b91ceb4730
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-mc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_MC_H
+#define TEGRA210_MC_H
+
+#include "mc.h"
+
+/* register definitions */
+#define MC_LATENCY_ALLOWANCE_AVPC_0 0x2e4
+#define MC_LATENCY_ALLOWANCE_HC_0 0x310
+#define MC_LATENCY_ALLOWANCE_HC_1 0x314
+#define MC_LATENCY_ALLOWANCE_MPCORE_0 0x320
+#define MC_LATENCY_ALLOWANCE_NVENC_0 0x328
+#define MC_LATENCY_ALLOWANCE_PPCS_0 0x344
+#define MC_LATENCY_ALLOWANCE_PPCS_1 0x348
+#define MC_LATENCY_ALLOWANCE_ISP2_0 0x370
+#define MC_LATENCY_ALLOWANCE_ISP2_1 0x374
+#define MC_LATENCY_ALLOWANCE_XUSB_0 0x37c
+#define MC_LATENCY_ALLOWANCE_XUSB_1 0x380
+#define MC_LATENCY_ALLOWANCE_TSEC_0 0x390
+#define MC_LATENCY_ALLOWANCE_VIC_0 0x394
+#define MC_LATENCY_ALLOWANCE_VI2_0 0x398
+#define MC_LATENCY_ALLOWANCE_GPU_0 0x3ac
+#define MC_LATENCY_ALLOWANCE_SDMMCA_0 0x3b8
+#define MC_LATENCY_ALLOWANCE_SDMMCAA_0 0x3bc
+#define MC_LATENCY_ALLOWANCE_SDMMC_0 0x3c0
+#define MC_LATENCY_ALLOWANCE_SDMMCAB_0 0x3c4
+#define MC_LATENCY_ALLOWANCE_GPU2_0 0x3e8
+#define MC_LATENCY_ALLOWANCE_NVDEC_0 0x3d8
+#define MC_MLL_MPCORER_PTSA_RATE 0x44c
+#define MC_FTOP_PTSA_RATE 0x50c
+#define MC_EMEM_ARB_TIMING_RFCPB 0x6c0
+#define MC_EMEM_ARB_TIMING_CCDMW 0x6c4
+#define MC_EMEM_ARB_REFPB_HP_CTRL 0x6f0
+#define MC_EMEM_ARB_REFPB_BANK_CTRL 0x6f4
+#define MC_PTSA_GRANT_DECREMENT 0x960
+#define MC_EMEM_ARB_DHYST_CTRL 0xbcc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0 0xbd0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1 0xbd4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2 0xbd8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3 0xbdc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4 0xbe0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5 0xbe4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6 0xbe8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7 0xbec
+
+#endif
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index b42bdb667e85..055af0e08a2e 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
-#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -327,7 +326,6 @@ struct emc_timing {
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
- struct completion clk_handshake_complete;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -374,52 +372,10 @@ static int emc_seq_update_timing(struct tegra_emc *emc)
return 0;
}
-static void emc_complete_clk_change(struct tegra_emc *emc)
-{
- struct emc_timing *timing = emc->new_timing;
- unsigned int dram_num;
- bool failed = false;
- int err;
-
- /* re-enable auto-refresh */
- dram_num = tegra_mc_get_emem_device_count(emc->mc);
- writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
- emc->regs + EMC_REFCTRL);
-
- /* restore auto-calibration */
- if (emc->vref_cal_toggle)
- writel_relaxed(timing->emc_auto_cal_interval,
- emc->regs + EMC_AUTO_CAL_INTERVAL);
-
- /* restore dynamic self-refresh */
- if (timing->emc_cfg_dyn_self_ref) {
- emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
- writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
- }
-
- /* set number of clocks to wait after each ZQ command */
- if (emc->zcal_long)
- writel_relaxed(timing->emc_zcal_cnt_long,
- emc->regs + EMC_ZCAL_WAIT_CNT);
-
- /* wait for writes to settle */
- udelay(2);
-
- /* update restored timing */
- err = emc_seq_update_timing(emc);
- if (err)
- failed = true;
-
- /* restore early ACK */
- mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
-
- WRITE_ONCE(emc->bad_state, failed);
-}
-
static irqreturn_t tegra_emc_isr(int irq, void *data)
{
struct tegra_emc *emc = data;
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 status;
status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
@@ -434,18 +390,6 @@ static irqreturn_t tegra_emc_isr(int irq, void *data)
/* clear interrupts */
writel_relaxed(status, emc->regs + EMC_INTSTATUS);
- /* notify about EMC-CAR handshake completion */
- if (status & EMC_CLKCHANGE_COMPLETE_INT) {
- if (completion_done(&emc->clk_handshake_complete)) {
- dev_err_ratelimited(emc->dev,
- "bogus handshake interrupt\n");
- return IRQ_NONE;
- }
-
- emc_complete_clk_change(emc);
- complete(&emc->clk_handshake_complete);
- }
-
return IRQ_HANDLED;
}
@@ -801,29 +745,58 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
*/
mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
- reinit_completion(&emc->clk_handshake_complete);
-
- emc->new_timing = timing;
-
return 0;
}
static int emc_complete_timing_change(struct tegra_emc *emc,
unsigned long rate)
{
- unsigned long timeout;
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned int dram_num;
+ int err;
+ u32 v;
- timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- msecs_to_jiffies(100));
- if (timeout == 0) {
- dev_err(emc->dev, "emc-car handshake failed\n");
- return -EIO;
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
}
- if (READ_ONCE(emc->bad_state))
- return -EIO;
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
- return 0;
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (!err)
+ emc->bad_state = false;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ return err;
}
static int emc_unprepare_timing_change(struct tegra_emc *emc,
@@ -1033,7 +1006,7 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
- u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
u32 fbio_cfg5, emc_cfg, emc_dbg;
enum emc_dram_type dram_type;
@@ -1275,11 +1248,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
return;
}
- debugfs_create_file("available_rates", S_IRUGO, emc->debugfs.root,
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
emc, &tegra_emc_debug_available_rates_fops);
- debugfs_create_file("min_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_min_rate_fops);
- debugfs_create_file("max_rate", S_IRUGO | S_IWUSR, emc->debugfs.root,
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
emc, &tegra_emc_debug_max_rate_fops);
}
@@ -1321,7 +1294,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (!emc->mc)
return -EPROBE_DEFER;
- init_completion(&emc->clk_handshake_complete);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index db526dbf71ee..159a16f5e7d6 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -27,7 +27,7 @@
#define WSTROBE_SHIFT 20
#define WSETUP_SHIFT 26
#define EW_SHIFT 30
-#define SS_SHIFT 31
+#define SSTROBE_SHIFT 31
#define TA(x) ((x) << TA_SHIFT)
#define RHOLD(x) ((x) << RHOLD_SHIFT)
@@ -37,7 +37,7 @@
#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
#define WSETUP(x) ((x) << WSETUP_SHIFT)
#define EW(x) ((x) << EW_SHIFT)
-#define SS(x) ((x) << SS_SHIFT)
+#define SSTROBE(x) ((x) << SSTROBE_SHIFT)
#define ASIZE_MAX 0x1
#define TA_MAX 0x3
@@ -48,7 +48,7 @@
#define WSTROBE_MAX 0x3f
#define WSETUP_MAX 0xf
#define EW_MAX 0x1
-#define SS_MAX 0x1
+#define SSTROBE_MAX 0x1
#define NUM_CS 4
#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
@@ -59,7 +59,7 @@
#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
-#define SS_VAL(x) (((x) & SS(SS_MAX)) >> SS_SHIFT)
+#define SSTROBE_VAL(x) (((x) & SSTROBE(SSTROBE_MAX)) >> SSTROBE_SHIFT)
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
@@ -67,7 +67,7 @@
#define ACR_ASIZE_MASK 0x3
#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
+#define ACR_SSTROBE_MASK BIT(31)
#define ASIZE_16BIT 1
#define CONFIG_MASK (TA(TA_MAX) | \
@@ -77,7 +77,7 @@
WHOLD(WHOLD_MAX) | \
WSTROBE(WSTROBE_MAX) | \
WSETUP(WSETUP_MAX) | \
- EW(EW_MAX) | SS(SS_MAX) | \
+ EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
ASIZE_MAX)
/**
@@ -204,7 +204,7 @@ static int aemif_config_abus(struct platform_device *pdev, int csnum)
if (data->enable_ew)
set |= ACR_EW_MASK;
if (data->enable_ss)
- set |= ACR_SS_MASK;
+ set |= ACR_SSTROBE_MASK;
val = readl(aemif->base + offset);
val &= ~CONFIG_MASK;
@@ -246,7 +246,7 @@ static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
data->enable_ew = EW_VAL(val);
- data->enable_ss = SS_VAL(val);
+ data->enable_ss = SSTROBE_VAL(val);
data->asize = val & ASIZE_MAX;
}
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 9c90f815ad3a..6c747c1e98cb 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -248,7 +248,7 @@ MODULE_DEVICE_TABLE(of, ti_emif_of_match);
static int ti_emif_resume(struct device *dev)
{
unsigned long tmp =
- __raw_readl((void *)emif_instance->ti_emif_sram_virt);
+ __raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
/*
* Check to see if what we are copying is already present in the
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 0a9c5ddf2f59..383091517ed7 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -314,7 +314,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
}
while (length) {
- unsigned int uninitialized_var(p_off);
+ unsigned int p_off;
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 5b966b54d6e9..fc35c7404429 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -198,7 +198,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
host->block_pos);
while (length) {
- unsigned int uninitialized_var(p_off);
+ unsigned int p_off;
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
index 74cee7cb0afc..d939ccc46509 100644
--- a/drivers/mfd/ioc3.c
+++ b/drivers/mfd/ioc3.c
@@ -616,7 +616,10 @@ static int ioc3_mfd_probe(struct pci_dev *pdev,
/* Remove all already added MFD devices */
mfd_remove_devices(&ipd->pdev->dev);
if (ipd->domain) {
+ struct fwnode_handle *fn = ipd->domain->fwnode;
+
irq_domain_remove(ipd->domain);
+ irq_domain_free_fwnode(fn);
free_irq(ipd->domain_irq, (void *)ipd);
}
pci_iounmap(pdev, regs);
@@ -643,7 +646,10 @@ static void ioc3_mfd_remove(struct pci_dev *pdev)
/* Release resources */
mfd_remove_devices(&ipd->pdev->dev);
if (ipd->domain) {
+ struct fwnode_handle *fn = ipd->domain->fwnode;
+
irq_domain_remove(ipd->domain);
+ irq_domain_free_fwnode(fn);
free_irq(ipd->domain_irq, (void *)ipd);
}
pci_iounmap(pdev, ipd->regs);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e1b1ba5e2b92..ce136d685d14 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -24,7 +24,7 @@ config AD525X_DPOT
AD5271, AD5272, AD5274
digital potentiometer chips.
- See Documentation/misc-devices/ad525x_dpot.txt for the
+ See Documentation/misc-devices/ad525x_dpot.rst for the
userspace interface.
This driver can also be built as a module. If so, the module
@@ -83,7 +83,7 @@ config IBM_ASM
WARNING: This software may not be supported or function
correctly on your IBM server. Please consult the IBM ServerProven
- website <http://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
+ website <https://www-03.ibm.com/systems/info/x86servers/serverproven/compat/us/>
for information on the specific driver level and support statement
for your IBM server.
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index ccce3226a571..6f164522b028 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -58,7 +58,7 @@
* AD5272 1 1024 20, 50, 100 (50-TP)
* AD5274 1 256 20, 50, 100 (50-TP)
*
- * See Documentation/misc-devices/ad525x_dpot.txt for more info.
+ * See Documentation/misc-devices/ad525x_dpot.rst for more info.
*
* derived from ad5258.c
* Copyright (c) 2009 Cyber Switching, Inc.
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 33bba1802289..80d87e8a0bea 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -923,7 +923,7 @@ struct c2port_device *c2port_device_register(char *name,
}
dev_set_drvdata(c2dev->dev, c2dev);
- strncpy(c2dev->name, name, C2PORT_NAME_LEN);
+ strncpy(c2dev->name, name, C2PORT_NAME_LEN - 1);
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index 1f56267ed2f4..895128475d83 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index 489ebe907688..a07674ed0596 100644
--- a/drivers/misc/cardreader/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -37,10 +37,11 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg1 = 0;
u8 reg3 = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
if (!rtsx_vendor_setting_valid(reg1))
@@ -52,16 +53,17 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
- rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+ pci_read_config_byte(pdev, PCR_SETTING_REG3, &reg3);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
}
static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg = 0;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c
index 659056164b21..39a6a7ecc32e 100644
--- a/drivers/misc/cardreader/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -23,9 +23,10 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (rts5209_vendor_setting1_valid(reg)) {
@@ -34,7 +35,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->aspm_en = rts5209_reg_to_aspm(reg);
}
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rts5209_vendor_setting2_valid(reg)) {
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 3a9467aaa435..f5f392ddf3d6 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -56,9 +56,10 @@ static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -69,7 +70,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
new file mode 100644
index 000000000000..28feab1449ab
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5228.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0xB5, 0xB5, 0xB5},
+ {0xE6, 0x7E, 0xFE},
+ {0x6B, 0x6B, 0x6B},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ u32 reg;
+
+ /* 0x724~0x727 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+
+ /* 0x814~0x817 */
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40);
+}
+
+static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5228_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5228_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5228_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1,
+ RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART);
+ mdelay(2);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON);
+
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5228_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL,
+ RTS5228_PUPDC, RTS5228_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
+ RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5228_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5228_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5228_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5228_stop_cmd(pcr);
+ rts5228_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static void rts5228_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+}
+
+static void rts5228_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+}
+
+static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5228_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
+ RTS5228_LDO_POWERON_MASK, 0);
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5228_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_LMT_THD_MASK,
+ RTS5228_LDO1_LMT_THD_1500);
+
+ rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5228_enable_ocp(pcr);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
+ RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(1000);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5228_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5228_clear_ocpstat(pcr);
+ rts5228_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (0 == (lval & 0x0F))
+ rtsx_pci_enable_oobs_polling(pcr);
+ else
+ rtsx_pci_disable_oobs_polling(pcr);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5228_init_from_cfg(pcr);
+
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5228_fill_driving(pcr, OUTPUT_3V3);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ return 0;
+}
+
+static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 mask, val;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
+ mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ mdelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5228_enable_aspm(pcr, true);
+ else
+ rts5228_disable_aspm(pcr, false);
+}
+
+static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5228_pcr_ops = {
+ .fetch_vendor_settings = rtsx5228_fetch_vendor_settings,
+ .turn_on_led = rts5228_turn_on_led,
+ .turn_off_led = rts5228_turn_off_led,
+ .extra_init_hw = rts5228_extra_init_hw,
+ .enable_auto_blink = rts5228_enable_auto_blink,
+ .disable_auto_blink = rts5228_disable_auto_blink,
+ .card_power_on = rts5228_card_power_on,
+ .card_power_off = rts5228_card_power_off,
+ .switch_output_voltage = rts5228_switch_output_voltage,
+ .force_power_down = rts5228_force_power_down,
+ .stop_cmd = rts5228_stop_cmd,
+ .set_aspm = rts5228_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5228_enable_ocp,
+ .disable_ocp = rts5228_disable_ocp,
+ .init_ocp = rts5228_init_ocp,
+ .process_ocp = rts5228_process_ocp,
+ .clear_ocpstat = rts5228_clear_ocpstat,
+ .optimize_phy = rts5228_optimize_phy,
+};
+
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = clk - 4;
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = 125/clk + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2) - 1;
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5228_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5228_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5228_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5228_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
+}
diff --git a/drivers/misc/cardreader/rts5228.h b/drivers/misc/cardreader/rts5228.h
new file mode 100644
index 000000000000..6a872246aeed
--- /dev/null
+++ b/drivers/misc/cardreader/rts5228.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky WU <ricky_wu@realtek.com>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5228_H
+#define RTS5228_H
+
+
+#define RTS5228_AUTOLOAD_CFG0 0xFF7B
+#define RTS5228_AUTOLOAD_CFG1 0xFF7C
+#define RTS5228_AUTOLOAD_CFG2 0xFF7D
+#define RTS5228_AUTOLOAD_CFG3 0xFF7E
+#define RTS5228_AUTOLOAD_CFG4 0xFF7F
+
+#define RTS5228_REG_VREF 0xFE97
+#define RTS5228_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5228_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5228_SSC_DEPTH_MASK 0x07
+#define RTS5228_SSC_DEPTH_DISALBE 0x00
+#define RTS5228_SSC_DEPTH_8M 0x01
+#define RTS5228_SSC_DEPTH_4M 0x02
+#define RTS5228_SSC_DEPTH_2M 0x03
+#define RTS5228_SSC_DEPTH_1M 0x04
+#define RTS5228_SSC_DEPTH_512K 0x05
+#define RTS5228_SSC_DEPTH_256K 0x06
+#define RTS5228_SSC_DEPTH_128K 0x07
+
+/* DMACTL 0xFE2C */
+#define RTS5228_DMA_PACK_SIZE_MASK 0xF0
+
+#define RTS5228_REG_LDO12_CFG 0xFF6E
+#define RTS5228_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO12_100 (0x00<<1)
+#define RTS5228_LDO12_105 (0x01<<1)
+#define RTS5228_LDO12_110 (0x02<<1)
+#define RTS5228_LDO12_115 (0x03<<1)
+#define RTS5228_LDO12_120 (0x04<<1)
+#define RTS5228_LDO12_125 (0x05<<1)
+#define RTS5228_LDO12_130 (0x06<<1)
+#define RTS5228_LDO12_135 (0x07<<1)
+#define RTS5228_REG_PWD_LDO12 (0x01<<0)
+
+#define RTS5228_REG_LDO12_L12 0xFF6F
+#define RTS5228_LDO12_L12_MASK (0x07<<4)
+#define RTS5228_LDO12_L12_120 (0x04<<4)
+
+/* LDO control register */
+#define RTS5228_CARD_PWR_CTL 0xFD50
+#define RTS5228_PUPDC (0x01<<5)
+
+#define RTS5228_LDO1233318_POW_CTL 0xFF70
+#define RTS5228_LDO3318_POWERON (0x01<<3)
+#define RTS5228_LDO1_POWEROFF (0x00<<0)
+#define RTS5228_LDO1_SOFTSTART (0x01<<0)
+#define RTS5228_LDO1_FULLON (0x03<<0)
+#define RTS5228_LDO1_POWERON_MASK (0x03<<0)
+#define RTS5228_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5228_DV3318_CFG 0xFF71
+#define RTS5228_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5228_DV3318_17 (0x00<<4)
+#define RTS5228_DV3318_1V75 (0x01<<4)
+#define RTS5228_DV3318_18 (0x02<<4)
+#define RTS5228_DV3318_1V85 (0x03<<4)
+#define RTS5228_DV3318_19 (0x04<<4)
+#define RTS5228_DV3318_33 (0x07<<4)
+#define RTS5228_DV3318_SR_MASK (0x03<<2)
+#define RTS5228_DV3318_SR_0 (0x00<<2)
+#define RTS5228_DV3318_SR_250 (0x01<<2)
+#define RTS5228_DV3318_SR_500 (0x02<<2)
+#define RTS5228_DV3318_SR_1000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG0 0xFF72
+#define RTS5228_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5228_LDO1_OCP_EN (0x01<<4)
+#define RTS5228_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5228_LDO1_OCP_LMT_EN (0x01<<1)
+
+#define RTS5228_LDO1_OCP_THD_730 (0x00<<5)
+#define RTS5228_LDO1_OCP_THD_780 (0x01<<5)
+#define RTS5228_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5228_LDO1_OCP_THD_930 (0x03<<5)
+#define RTS5228_LDO1_OCP_THD_1000 (0x04<<5)
+#define RTS5228_LDO1_OCP_THD_1070 (0x05<<5)
+#define RTS5228_LDO1_OCP_THD_1140 (0x06<<5)
+#define RTS5228_LDO1_OCP_THD_1220 (0x07<<5)
+
+#define RTS5228_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5228_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5228_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5228_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5228_LDO1_CFG1 0xFF73
+#define RTS5228_LDO1_SR_TIME_MASK (0x03<<6)
+#define RTS5228_LDO1_SR_0_0 (0x00<<6)
+#define RTS5228_LDO1_SR_0_25 (0x01<<6)
+#define RTS5228_LDO1_SR_0_5 (0x02<<6)
+#define RTS5228_LDO1_SR_1_0 (0x03<<6)
+#define RTS5228_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5228_LDO1_18 (0x05<<1)
+#define RTS5228_LDO1_33 (0x07<<1)
+#define RTS5228_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5228_AUXCLK_GAT_CTL 0xFF74
+
+#define RTS5228_REG_RREF_CTL_0 0xFF75
+#define RTS5228_FORCE_RREF_EXTL (0x01<<7)
+#define RTS5228_REG_BG33_MASK (0x07<<0)
+#define RTS5228_RREF_12_1V (0x04<<0)
+#define RTS5228_RREF_12_3V (0x05<<0)
+
+#define RTS5228_REG_RREF_CTL_1 0xFF76
+
+#define RTS5228_REG_RREF_CTL_2 0xFF77
+#define RTS5228_TEST_INTL_RREF (0x01<<7)
+#define RTS5228_DGLCH_TIME_MASK (0x03<<5)
+#define RTS5228_DGLCH_TIME_50 (0x00<<5)
+#define RTS5228_DGLCH_TIME_75 (0x01<<5)
+#define RTS5228_DGLCH_TIME_100 (0x02<<5)
+#define RTS5228_DGLCH_TIME_125 (0x03<<5)
+#define RTS5228_REG_REXT_TUNE_MASK (0x1F<<0)
+
+#define RTS5228_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+
+
+/* Single LUN, support SD */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5228_H */
diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9f080a32ef50..89e6f124ca5c 100644
--- a/drivers/misc/cardreader/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -23,9 +23,10 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
@@ -37,7 +38,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 6c6c9e95a29f..941b3d77f1e9 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -55,9 +55,10 @@ static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -70,7 +71,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -93,32 +94,33 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
u32 lval;
- if (CHK_PCI_PID(pcr, PID_524A))
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG1, &lval);
- else
- rtsx_pci_read_config_dword(pcr,
- PCR_ASPM_SETTING_REG2, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 7a9dbb778e84..b9f66b1384a6 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -64,9 +64,10 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
@@ -79,7 +80,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
@@ -496,21 +497,27 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
struct rtsx_cr_option *option = &pcr->option;
u32 lval;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
@@ -518,7 +525,7 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 195822ec858e..471961487ff8 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -59,9 +59,11 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
u32 reg;
+
/* 0x814~0x817 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (!rts5261_vendor_setting_valid(reg)) {
@@ -76,7 +78,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->flags |= PCR_REVERSE_SOCKET;
/* 0x724~0x727 */
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
pcr->aspm_en = rts5261_reg_to_aspm(reg);
@@ -361,6 +363,7 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr)
static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int retval;
u32 lval, i;
u8 valid, efuse_valid, tmp;
@@ -386,8 +389,7 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
if (efuse_valid == 0) {
- retval = rtsx_pci_read_config_dword(pcr,
- PCR_SETTING_REG2, &lval);
+ retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
if (retval != 0)
pcr_dbg(pcr, "read 0x814 DW fail\n");
pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
@@ -399,9 +401,9 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
- rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval);
lval = lval & 0x00FFFFFF;
- retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval);
if (retval != 0)
pcr_dbg(pcr, "write config fail\n");
@@ -410,27 +412,33 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (lval & ASPM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
- if (lval & ASPM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
- if (lval & PM_L1_1_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
- if (lval & PM_L1_2_EN_MASK)
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
@@ -439,7 +447,7 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
if (option->ltr_en) {
u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 0d5928bc1b6d..37ccc67f4914 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -23,6 +23,7 @@
#include "rtsx_pcr.h"
#include "rts5261.h"
+#include "rts5228.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -50,6 +51,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -206,16 +208,10 @@ int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
int err, i, finished = 0;
u8 tmp;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
+ rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -247,16 +243,10 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
int err, i, finished = 0;
u16 data;
- u8 *ptr, tmp;
+ u8 tmp, val1, val2;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
+ rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
@@ -272,17 +262,9 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
if (!finished)
return -ETIMEDOUT;
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
- rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
-
- ptr = rtsx_pci_get_cmd_data(pcr);
- data = ((u16)ptr[1] << 8) | ptr[0];
+ rtsx_pci_read_register(pcr, PHYDATA0, &val1);
+ rtsx_pci_read_register(pcr, PHYDATA1, &val2);
+ data = val1 | (val2 << 8);
if (val)
*val = data;
@@ -417,7 +399,7 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
if (len > 0xFFFF)
val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
| (((u64)len >> 16) << 6) | option;
@@ -723,6 +705,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (PCI_PID(pcr) == PID_5261)
return rts5261_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
+ if (PCI_PID(pcr) == PID_5228)
+ return rts5228_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
@@ -1111,8 +1096,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
}
-#ifdef CONFIG_PM
-static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
@@ -1126,7 +1110,6 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
}
-#endif
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
@@ -1202,6 +1185,36 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
}
}
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val |= 1<<9;
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
+
+}
+
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
+{
+ u16 val;
+
+ if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ rtsx_pci_read_phy_register(pcr, 0x01, &val);
+ val &= ~(1<<9);
+ rtsx_pci_write_phy_register(pcr, 0x01, val);
+ }
+ rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
+ rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
+
+}
+
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
@@ -1231,9 +1244,13 @@ int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
+ struct pci_dev *pdev = pcr->pci;
int err;
- pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
+ if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
+ RTS5228_LDO1_SR_0_5);
+
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
rtsx_pci_enable_bus_int(pcr);
@@ -1280,6 +1297,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (PCI_PID(pcr) == PID_5261)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5261_SSC_DEPTH_2M);
+ else if (PCI_PID(pcr) == PID_5228)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5228_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
@@ -1314,6 +1334,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_525A:
case PID_5260:
case PID_5261:
+ case PID_5228:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1324,9 +1345,10 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_init_ocp(pcr);
/* Enable clk_request_n to enable clock power management */
- rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
/* Enter L1 when host tx idle */
- rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
+ pci_write_config_byte(pdev, 0x70F, 0x5B);
if (pcr->ops->extra_init_hw) {
err = pcr->ops->extra_init_hw(pcr);
@@ -1401,6 +1423,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5261:
rts5261_init_params(pcr);
break;
+
+ case 0x5228:
+ rts5228_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1604,10 +1630,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
-#ifdef CONFIG_PM
-
-static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
+static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
@@ -1623,17 +1648,15 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
- pci_save_state(pcidev);
- pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
- pci_disable_device(pcidev);
- pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+ device_wakeup_disable(dev_d);
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
-static int rtsx_pci_resume(struct pci_dev *pcidev)
+static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
{
+ struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle;
struct rtsx_pcr *pcr;
int ret = 0;
@@ -1645,13 +1668,6 @@ static int rtsx_pci_resume(struct pci_dev *pcidev)
mutex_lock(&pcr->pcr_mutex);
- pci_set_power_state(pcidev, PCI_D0);
- pci_restore_state(pcidev);
- ret = pci_enable_device(pcidev);
- if (ret)
- goto out;
- pci_set_master(pcidev);
-
ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
if (ret)
goto out;
@@ -1667,6 +1683,8 @@ out:
return ret;
}
+#ifdef CONFIG_PM
+
static void rtsx_pci_shutdown(struct pci_dev *pcidev)
{
struct pcr_handle *handle;
@@ -1686,19 +1704,18 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
#else /* CONFIG_PM */
-#define rtsx_pci_suspend NULL
-#define rtsx_pci_resume NULL
#define rtsx_pci_shutdown NULL
#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
+
static struct pci_driver rtsx_pci_driver = {
.name = DRV_NAME_RTSX_PCI,
.id_table = rtsx_pci_ids,
.probe = rtsx_pci_probe,
.remove = rtsx_pci_remove,
- .suspend = rtsx_pci_suspend,
- .resume = rtsx_pci_resume,
+ .driver.pm = &rtsx_pci_pm_ops,
.shutdown = rtsx_pci_shutdown,
};
module_pci_driver(rtsx_pci_driver);
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 024cbd998b2a..6b322db8738e 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
void rts5261_init_params(struct rtsx_pcr *pcr);
+void rts5228_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -70,6 +71,8 @@ static inline u8 map_sd_drive(int idx)
#define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80))
#define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80)
+#define rtsx_check_mmc_support(reg) ((reg) & 0x10)
+#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02)
#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03)
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
@@ -100,6 +103,8 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr);
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index a328cab11014..59eda55d92a3 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -759,7 +759,7 @@ static int rtsx_usb_post_reset(struct usb_interface *intf)
return 0;
}
-static struct usb_device_id rtsx_usb_usb_ids[] = {
+static const struct usb_device_id rtsx_usb_usb_ids[] = {
{ USB_DEVICE(0x0BDA, 0x0129) },
{ USB_DEVICE(0x0BDA, 0x0139) },
{ USB_DEVICE(0x0BDA, 0x0140) },
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index b290bc2ee240..55b7ee0e8f93 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -166,37 +166,24 @@ void cb710_set_irq_handler(struct cb710_slot *slot,
}
EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
-#ifdef CONFIG_PM
-
-static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cb710_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
devm_free_irq(&pdev->dev, pdev->irq, chip);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- if (state.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int cb710_resume(struct pci_dev *pdev)
+static int __maybe_unused cb710_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pcim_enable_device(pdev);
- if (err)
- return err;
return devm_request_irq(&pdev->dev, pdev->irq,
cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
}
-#endif /* CONFIG_PM */
-
static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -312,15 +299,14 @@ static const struct pci_device_id cb710_pci_tbl[] = {
{ 0, }
};
+static SIMPLE_DEV_PM_OPS(cb710_pm_ops, cb710_suspend, cb710_resume);
+
static struct pci_driver cb710_driver = {
.name = KBUILD_MODNAME,
.id_table = cb710_pci_tbl,
.probe = cb710_probe,
.remove = cb710_remove_one,
-#ifdef CONFIG_PM
- .suspend = cb710_suspend,
- .resume = cb710_resume,
-#endif
+ .driver.pm = &cb710_pm_ops,
};
static int __init cb710_init_module(void)
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index dfd2969e3628..e5a4ed3701eb 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -117,6 +117,7 @@ static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
/**
* cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
* @miter: sg mapping iterator used for writing
+ * @data: data to write to sg buffer
*
* Description:
* Writes 32-bit word starting at byte pointed to by @miter@
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index cb9cca35a226..5b93ff51d82a 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -175,7 +175,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
struct update_nodes_workarea *unwa;
u32 action, node_count;
int token, rc, i;
- __be32 *data, drc_index, phandle;
+ __be32 *data, phandle;
char *buf;
token = rtas_token("ibm,update-nodes");
@@ -213,7 +213,7 @@ static int update_devicetree(struct cxl *adapter, s32 scope)
break;
case OPCODE_ADD:
/* nothing to do, just move pointer */
- drc_index = *data++;
+ data++;
break;
}
}
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
index b7c57f67f4f5..aba5e20eeb1f 100644
--- a/drivers/misc/cxl/hcalls.c
+++ b/drivers/misc/cxl/hcalls.c
@@ -167,7 +167,7 @@ long cxl_h_attach_process(u64 unit_address,
}
}
-/**
+/*
* cxl_h_detach_process - Detach a process element from a coherent
* platform function.
*/
@@ -197,7 +197,7 @@ long cxl_h_detach_process(u64 unit_address, u64 process_token)
}
}
-/**
+/*
* cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
* the partition to manipulate or query
* certain coherent platform function behaviors.
@@ -238,7 +238,7 @@ static long cxl_h_control_function(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_afu - Perform a reset to the coherent platform function.
*/
long cxl_h_reset_afu(u64 unit_address)
@@ -249,7 +249,7 @@ long cxl_h_reset_afu(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_suspend_process - Suspend a process from being executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -262,7 +262,7 @@ long cxl_h_suspend_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_resume_process - Resume a process to be executed
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -275,7 +275,7 @@ long cxl_h_resume_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_read_error_state - Checks the error state of the coherent
* platform function.
* R4 contains the error state
@@ -288,7 +288,7 @@ long cxl_h_read_error_state(u64 unit_address, u64 *state)
state);
}
-/**
+/*
* cxl_h_get_afu_err - collect the AFU error buffer
* Parameter1 = byte offset into error buffer to retrieve, valid values
* are between 0 and (ibm,error-buffer-size - 1)
@@ -304,7 +304,7 @@ long cxl_h_get_afu_err(u64 unit_address, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_get_config - collect configuration record for the
* coherent platform function
* Parameter1 = # of configuration record to retrieve, valid values are
@@ -324,7 +324,7 @@ long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
NULL);
}
-/**
+/*
* cxl_h_terminate_process - Terminate the process before completion
* Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
* process was attached.
@@ -337,7 +337,7 @@ long cxl_h_terminate_process(u64 unit_address, u64 process_token)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = # of VPD record to retrieve, valid values are between 0
* and (ibm,#config-records - 1).
@@ -355,7 +355,7 @@ long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
out);
}
-/**
+/*
* cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
*/
long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
@@ -365,7 +365,7 @@ long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
0, 0, 0, 0, reg);
}
-/**
+/*
* cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
* based on an interrupt
* Parameter1 = value to write to the function-wide error interrupt register
@@ -378,7 +378,7 @@ long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
* an error log
*/
@@ -390,7 +390,7 @@ long cxl_h_get_error_log(u64 unit_address, u64 value)
NULL);
}
-/**
+/*
* cxl_h_collect_int_info - Collect interrupt info about a coherent
* platform function after an interrupt occurred.
*/
@@ -425,7 +425,7 @@ long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_faults - Control the operation of a coherent platform
* function after a fault occurs.
*
@@ -470,7 +470,7 @@ long cxl_h_control_faults(u64 unit_address, u64 process_token,
}
}
-/**
+/*
* cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
* allows the partition to manipulate or query
* certain coherent platform facility behaviors.
@@ -509,7 +509,7 @@ static long cxl_h_control_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
*/
long cxl_h_reset_adapter(u64 unit_address)
@@ -520,7 +520,7 @@ long cxl_h_reset_adapter(u64 unit_address)
NULL);
}
-/**
+/*
* cxl_h_collect_vpd - Collect VPD for the coherent platform function.
* Parameter1 = 4K naturally aligned real buffer containing block
* list entries
@@ -536,7 +536,7 @@ long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
out);
}
-/**
+/*
* cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
* hypervisor call provide platform support for
* downloading a base adapter image to the coherent
@@ -616,7 +616,7 @@ static long cxl_h_download_facility(u64 unit_address, u64 op,
}
}
-/**
+/*
* cxl_h_download_adapter_image - Download the base image to the coherent
* platform facility.
*/
@@ -629,7 +629,7 @@ long cxl_h_download_adapter_image(u64 unit_address,
list_address, num, out);
}
-/**
+/*
* cxl_h_validate_adapter_image - Validate the base image in the coherent
* platform facility.
*/
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index f0263d1a1fdf..d97a243ad30c 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
&afu->dev.kobj, "cr%i", cr->cr);
if (rc)
- goto err;
+ goto err1;
rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
if (rc)
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 1cf320e2a415..1264253cc07b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -150,7 +150,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
@@ -184,7 +184,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
out:
cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+ return rc ? PCIBIOS_SET_FAILED : 0;
}
static struct pci_ops cxl_pcie_pci_ops =
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 713e92ee27ac..3c4eaba86576 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -66,13 +66,13 @@
Path Models", IEEE Transactions on communications, COM-25,
No. 6, June
1977.
- http://www.rowetel.com/images/echo/dual_path_paper.pdf
+ https://www.rowetel.com/images/echo/dual_path_paper.pdf
[2] The classic, very useful paper that tells you how to
actually build a real world echo canceller:
Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice
Echo Canceller with a TMS320020,
- http://www.rowetel.com/images/echo/spra129.pdf
+ https://www.rowetel.com/images/echo/spra129.pdf
[3] I have written a series of blog posts on this work, here is
Part 1: http://www.rowetel.com/blog/?p=18
@@ -80,7 +80,7 @@
[4] The source code http://svn.rowetel.com/software/oslec/
[5] A nice reference on LMS filters:
- http://en.wikipedia.org/wiki/Least_mean_squares_filter
+ https://en.wikipedia.org/wiki/Least_mean_squares_filter
Credits:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 9ff18d4961ce..2591c21b2b5d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -225,7 +225,7 @@ static const struct of_device_id at24_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at24_of_match);
-static const struct acpi_device_id at24_acpi_ids[] = {
+static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
{ "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 36a2eb837371..9627294fe3e9 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
/**
* eeprom_93cx6_readb - Read a byte from eeprom
* @eeprom: Pointer to eeprom structure
- * @word: Byte index from where we should start reading
+ * @byte: Byte index from where we should start reading
* @data: target pointer where the information will have to be stored
*
* This function will read a byte of the eeprom data
@@ -270,7 +270,7 @@ EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
* @eeprom: Pointer to eeprom structure
* @byte: Index from where we should start reading
* @data: target pointer where the information will have to be stored
- * @words: Number of bytes that should be read.
+ * @bytes: Number of bytes that should be read.
*
* This function will read all requested bytes from the eeprom,
* this is done by calling eeprom_93cx6_readb() multiple times.
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3c2d405bc79b..f950d0155876 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -103,7 +103,9 @@ EXPORT_SYMBOL_GPL(enclosure_for_each_device);
* enclosure_register - register device as an enclosure
*
* @dev: device containing the enclosure
+ * @name: chosen device name
* @components: number of components in the enclosure
+ * @cb: platform call-backs
*
* This sets up the device for being an enclosure. Note that @dev does
* not have to be a dedicated enclosure device. It may be some other type
@@ -266,7 +268,7 @@ static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_alloc - prepare a new enclosure component
* @edev: the enclosure to add the component
- * @num: the device number
+ * @number: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
@@ -347,7 +349,7 @@ EXPORT_SYMBOL_GPL(enclosure_component_register);
/**
* enclosure_add_device - add a device as being part of an enclosure
* @edev: the enclosure device being added to.
- * @num: the number of the component
+ * @component: the number of the component
* @dev: the device being added
*
* Declares a real device to reside in slot (or identifier) @num of an
@@ -389,7 +391,7 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
/**
* enclosure_remove_device - remove a device from an enclosure
* @edev: the enclosure device
- * @num: the number of the component to remove
+ * @dev: device to remove/put
*
* Returns zero on success or an error.
*
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 1dc6c7c5cbce..acc459fc8105 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -165,6 +165,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
/**
* genwqe_bus_reset() - Card recovery
+ * @cd: GenWQE device information
*
* pci_reset_function() will recover the device and ensure that the
* registers are accessible again when it completes with success. If
@@ -262,6 +263,7 @@ static void genwqe_tweak_hardware(struct genwqe_dev *cd)
/**
* genwqe_recovery_on_fatal_gfir_required() - Version depended actions
+ * @cd: GenWQE device information
*
* Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
* be ignored. This is e.g. true for the bitstream we gave to the card
@@ -280,6 +282,7 @@ int genwqe_flash_readback_fails(struct genwqe_dev *cd)
/**
* genwqe_T_psec() - Calculate PF/VF timeout register content
+ * @cd: GenWQE device information
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -303,6 +306,7 @@ static int genwqe_T_psec(struct genwqe_dev *cd)
/**
* genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*
* Do this _after_ card_reset() is called. Otherwise the values will
* vanish. The settings need to be done when the queues are inactive.
@@ -329,6 +333,7 @@ static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
/**
* genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
+ * @cd: GenWQE device information
*/
static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
{
@@ -543,6 +548,7 @@ static int genwqe_start(struct genwqe_dev *cd)
/**
* genwqe_stop() - Stop card operation
+ * @cd: GenWQE device information
*
* Recovery notes:
* As long as genwqe_thread runs we might access registers during
@@ -569,6 +575,8 @@ static int genwqe_stop(struct genwqe_dev *cd)
/**
* genwqe_recover_card() - Try to recover the card if it is possible
+ * @cd: GenWQE device information
+ * @fatal_err: Indicate whether to attempt soft reset
*
* If fatal_err is set no register access is possible anymore. It is
* likely that genwqe_start fails in that situation. Proper error
@@ -618,6 +626,7 @@ static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
/**
* genwqe_fir_checking() - Check the fault isolation registers of the card
+ * @cd: GenWQE device information
*
* If this code works ok, can be tried out with help of the genwqe_poke tool:
* sudo ./tools/genwqe_poke 0x8 0xfefefefefef
@@ -762,6 +771,7 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd)
/**
* genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
+ * @pci_dev: PCI device information struct
*
* Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
* reset method will not work in all cases.
@@ -826,8 +836,9 @@ static int genwqe_platform_recovery(struct genwqe_dev *cd)
return rc;
}
-/*
+/**
* genwqe_reload_bistream() - reload card bitstream
+ * @cd: GenWQE device information
*
* Set the appropriate register and call fundamental reset to reaload the card
* bitstream.
@@ -880,6 +891,7 @@ static int genwqe_reload_bistream(struct genwqe_dev *cd)
/**
* genwqe_health_thread() - Health checking thread
+ * @data: GenWQE device information
*
* This thread is only started for the PF of the card.
*
@@ -1043,18 +1055,17 @@ static int genwqe_health_thread_running(struct genwqe_dev *cd)
static int genwqe_health_check_stop(struct genwqe_dev *cd)
{
- int rc;
-
if (!genwqe_health_thread_running(cd))
return -EIO;
- rc = kthread_stop(cd->health_thread);
+ kthread_stop(cd->health_thread);
cd->health_thread = NULL;
return 0;
}
/**
* genwqe_pci_setup() - Allocate PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
@@ -1140,6 +1151,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/**
* genwqe_pci_remove() - Free PCIe related resources for our card
+ * @cd: GenWQE device information
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
@@ -1154,7 +1166,8 @@ static void genwqe_pci_remove(struct genwqe_dev *cd)
/**
* genwqe_probe() - Device initialization
- * @pdev: PCI device information struct
+ * @pci_dev: PCI device information struct
+ * @id: PCI device ID
*
* Callable for multiple cards. This function is called on bind.
*
@@ -1214,6 +1227,7 @@ static int genwqe_probe(struct pci_dev *pci_dev,
/**
* genwqe_remove() - Called when device is removed (hot-plugable)
+ * @pci_dev: PCI device information struct
*
* Or when driver is unloaded respecitively when unbind is done.
*/
@@ -1233,8 +1247,10 @@ static void genwqe_remove(struct pci_dev *pci_dev)
genwqe_dev_free(cd);
}
-/*
+/**
* genwqe_err_error_detected() - Error detection callback
+ * @pci_dev: PCI device information struct
+ * @state: PCI channel state
*
* This callback is called by the PCI subsystem whenever a PCI bus
* error is detected.
@@ -1324,7 +1340,7 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
return 0;
}
-static struct pci_error_handlers genwqe_err_handler = {
+static const struct pci_error_handlers genwqe_err_handler = {
.error_detected = genwqe_err_error_detected,
.mmio_enabled = genwqe_err_result_none,
.slot_reset = genwqe_err_slot_reset,
@@ -1342,6 +1358,8 @@ static struct pci_driver genwqe_driver = {
/**
* genwqe_devnode() - Set default access mode for genwqe devices.
+ * @dev: Pointer to device (unused)
+ * @mode: Carrier to pass-back given mode (permissions)
*
* Default mode should be rw for everybody. Do not change default
* device name.
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 905106579935..0db4000dedf2 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -244,10 +244,13 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
(cd->card_state != GENWQE_CARD_USED);
}
+#define RET_DDCB_APPENDED 1
+#define RET_DDCB_TAPPED 2
/**
* enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @queue: queue this operation should be done on
+ * @pddcb: pointer to ddcb structure
* @ddcb_no: pointer to ddcb number being tapped
*
* Start execution of DDCB by tapping or append to queue via NEXT
@@ -259,9 +262,6 @@ static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
* Return: 1 if new DDCB is appended to previous
* 2 if DDCB queue is tapped via register/simulation
*/
-#define RET_DDCB_APPENDED 1
-#define RET_DDCB_TAPPED 2
-
static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
struct ddcb *pddcb, int ddcb_no)
{
@@ -316,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
+ * @req: pointer to requsted DDCB parameters
+ * @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
* conversion made, since data structure in ASV is still
@@ -356,6 +358,7 @@ static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
/**
* genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
* @cd: pointer to genwqe device descriptor
+ * @queue: queue to be checked
*
* Return: Number of DDCBs which were finished
*/
@@ -553,6 +556,8 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
/**
* get_next_ddcb() - Get next available DDCB
* @cd: pointer to genwqe device descriptor
+ * @queue: DDCB queue
+ * @num: internal DDCB number
*
* DDCB's content is completely cleared but presets for PRE and
* SEQNUM. This function must only be called when ddcb_lock is held.
@@ -900,7 +905,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
/**
* __genwqe_execute_raw_ddcb() - Setup and execute DDCB
* @cd: pointer to genwqe device descriptor
- * @req: user provided DDCB request
+ * @cmd: user provided DDCB command
* @f_flags: file mode: blocking, non-blocking
*/
int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
@@ -965,6 +970,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
/**
* genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
+ * @cd: pointer to genwqe device descriptor
*
* We use this as condition for our wait-queue code.
*/
@@ -993,6 +999,7 @@ static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
/**
* genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
+ * @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
* queue. This is needed for statistics as well as conditon if we want
@@ -1171,6 +1178,7 @@ static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
/**
* genwqe_card_thread() - Work thread for the DDCB queue
+ * @data: pointer to genwqe device descriptor
*
* The idea is to check if there are DDCBs in processing. If there are
* some finished DDCBs, we process them and wakeup the
@@ -1299,6 +1307,7 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd)
/**
* queue_wake_up_all() - Handles fatal error case
+ * @cd: pointer to genwqe device descriptor
*
* The PCI device got unusable and we have to stop all pending
* requests as fast as we can. The code after this must purge the
@@ -1323,6 +1332,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
/**
* genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
+ * @cd: pointer to genwqe device descriptor
*
* Relies on the pre-condition that there are no users of the card
* device anymore e.g. with open file-descriptors.
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 1b5b82e65268..491fb4482da2 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 040a0bda3125..55fc5b80e649 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -87,7 +87,7 @@ static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
* @cfile: Descriptor of opened file
* @u_addr: User virtual address
* @size: Size of buffer
- * @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
*
* Return: Pointer to the corresponding mapping NULL if not found
*/
@@ -144,6 +144,7 @@ static void __genwqe_del_mapping(struct genwqe_file *cfile,
* @u_addr: user virtual address
* @size: size of buffer
* @dma_addr: DMA address to be updated
+ * @virt_addr: Virtual address to be updated
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
@@ -249,6 +250,8 @@ static void genwqe_remove_pinnings(struct genwqe_file *cfile)
/**
* genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
+ * @cd: GenWQE device information
+ * @sig: Signal to send out
*
* E.g. genwqe_send_signal(cd, SIGIO);
*/
@@ -380,6 +383,7 @@ static void genwqe_vma_open(struct vm_area_struct *vma)
/**
* genwqe_vma_close() - Called each time when vma is unmapped
+ * @vma: VMA area to close
*
* Free memory which got allocated by GenWQE mmap().
*/
@@ -416,6 +420,8 @@ static const struct vm_operations_struct genwqe_vma_ops = {
/**
* genwqe_mmap() - Provide contignous buffers to userspace
+ * @filp: File pointer (unused)
+ * @vma: VMA area to map
*
* We use mmap() to allocate contignous buffers used for DMA
* transfers. After the buffer is allocated we remap it to user-space
@@ -484,16 +490,15 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
}
+#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
+
/**
* do_flash_update() - Excute flash update (write image or CVPD)
- * @cd: genwqe device
+ * @cfile: Descriptor of opened file
* @load: details about image load
*
* Return: 0 if successful
*/
-
-#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
-
static int do_flash_update(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
@@ -820,6 +825,8 @@ static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
/**
* ddcb_cmd_cleanup() - Remove dynamically created fixup entries
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Only if there are any. Pinnings are not removed.
*/
@@ -844,6 +851,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
+ * @cfile: Descriptor of opened file
+ * @req: DDCB work request
*
* Before the DDCB gets executed we need to handle the fixups. We
* replace the user-space addresses with DMA addresses or do
@@ -974,6 +983,8 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/**
* genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
+ * @cfile: Descriptor of opened file
+ * @cmd: Command identifier (passed from user)
*
* The code will build up the translation tables or lookup the
* contignous memory allocation table to find the right translations
@@ -1339,6 +1350,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
/**
* genwqe_device_remove() - Remove genwqe's char device
+ * @cd: GenWQE device information
*
* This function must be called after the client devices are removed
* because it will free the major/minor number range for the genwqe
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 28a3fb1533f7..b2f115602523 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,7 +129,7 @@ static ssize_t base_clock_show(struct device *dev,
}
static DEVICE_ATTR_RO(base_clock);
-/**
+/*
* curr_bitstream_show() - Show the current bitstream id
*
* There is a bug in some old versions of the CPLD which selects the
@@ -156,7 +156,7 @@ static ssize_t curr_bitstream_show(struct device *dev,
}
static DEVICE_ATTR_RO(curr_bitstream);
-/**
+/*
* next_bitstream_show() - Show the next activated bitstream
*
* IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
@@ -260,7 +260,7 @@ static struct attribute *genwqe_normal_attributes[] = {
NULL,
};
-/**
+/*
* genwqe_is_visible() - Determine if sysfs attribute should be visible or not
*
* VFs have restricted mmio capabilities, so not all sysfs entries
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 77c21caf2acd..039b923d1d60 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -129,6 +129,9 @@ u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
/**
* genwqe_read_app_id() - Extract app_id
+ * @cd: genwqe device descriptor
+ * @app_name: carrier used to pass-back name
+ * @len: length of data for name
*
* app_unitcfg need to be filled with valid data first
*/
@@ -183,7 +186,7 @@ void genwqe_init_crc32(void)
* @init: initial crc (0xffffffff at start)
*
* polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
-
+ *
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
* result in a crc32 of 0xf33cb7d3.
*
@@ -277,7 +280,7 @@ static int genwqe_sgl_size(int num_pages)
return roundup(len, PAGE_SIZE);
}
-/**
+/*
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
@@ -460,6 +463,8 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
+ * @cd: genwqe device descriptor
+ * @sgl: scatter gather list describing user-space memory
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transferred from cached
@@ -710,6 +715,7 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
/**
* genwqe_set_interrupt_capability() - Configure MSI capability structure
* @cd: pointer to the device
+ * @count: number of vectors to allocate
* Return: 0 if no error
*/
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
@@ -738,7 +744,7 @@ void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
* @i: index to desired entry
* @m: maximum possible entries
* @addr: addr which is read
- * @index: index in debug array
+ * @idx: index in debug array
* @val: read value
*/
static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
@@ -818,6 +824,8 @@ int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
/**
* genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
*/
int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
{
@@ -871,6 +879,10 @@ int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
/**
* genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
+ * @regs: register information
+ * @max_regs: number of register entries
*/
int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
struct genwqe_reg *regs, unsigned int max_regs)
@@ -956,6 +968,10 @@ int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
/**
* genwqe_write_vreg() - Write register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @val: value to write
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -969,6 +985,9 @@ int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
/**
* genwqe_read_vreg() - Read register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -981,6 +1000,7 @@ u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
/**
* genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ * @cd: genwqe device descriptor
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -1005,6 +1025,7 @@ int genwqe_base_clock_frequency(struct genwqe_dev *cd)
/**
* genwqe_stop_traps() - Stop traps
+ * @cd: genwqe device descriptor
*
* Before reading out the analysis data, we need to stop the traps.
*/
@@ -1015,6 +1036,7 @@ void genwqe_stop_traps(struct genwqe_dev *cd)
/**
* genwqe_start_traps() - Start traps
+ * @cd: genwqe device descriptor
*
* After having read the data, we can/must enable the traps again.
*/
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index 421ebd903069..a786c0a7de9a 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -3,16 +3,15 @@
# Makefile for HabanaLabs AI accelerators driver
#
-obj-m := habanalabs.o
+obj-$(CONFIG_HABANA_AI) := habanalabs.o
-habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
- command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
- command_submission.o mmu.o firmware_if.o pci.o
-
-habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
+include $(src)/common/Makefile
+habanalabs-y += $(HL_COMMON_FILES)
include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
+
+habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
new file mode 100644
index 000000000000..b984bfa4face
--- /dev/null
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
+ common/asid.o common/habanalabs_ioctl.o \
+ common/command_buffer.o common/hw_queue.o common/irq.o \
+ common/sysfs.o common/hwmon.o common/memory.o \
+ common/command_submission.o common/mmu.o common/firmware_if.o \
+ common/pci.o
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/common/asid.c
index a2fdf31cf27c..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 02d13f71b1df..7c38c4f7f9c0 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -10,12 +10,18 @@
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/genalloc.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
- hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- (void *) (uintptr_t) cb->kernel_address,
- cb->bus_address);
+ if (cb->is_internal)
+ gen_pool_free(hdev->internal_cb_pool,
+ cb->kernel_address, cb->size);
+ else
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
+ (void *) (uintptr_t) cb->kernel_address,
+ cb->bus_address);
+
kfree(cb);
}
@@ -44,9 +50,10 @@ static void cb_release(struct kref *ref)
}
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
- int ctx_id)
+ int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
+ u32 cb_offset;
void *p;
/*
@@ -65,13 +72,25 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
if (!cb)
return NULL;
- if (ctx_id == HL_KERNEL_ASID_ID)
+ if (internal_cb) {
+ p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
+ if (!p) {
+ kfree(cb);
+ return NULL;
+ }
+
+ cb_offset = p - hdev->internal_cb_pool_virt_addr;
+ cb->is_internal = true;
+ cb->bus_address = hdev->internal_cb_va_base + cb_offset;
+ } else if (ctx_id == HL_KERNEL_ASID_ID) {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
- else
+ } else {
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
GFP_USER | __GFP_ZERO);
+ }
+
if (!p) {
dev_err(hdev->dev,
"failed to allocate %d of dma memory for CB\n",
@@ -87,7 +106,7 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
}
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 cb_size, u64 *handle, int ctx_id)
+ u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
@@ -112,28 +131,30 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out_err;
}
- /* Minimum allocation must be PAGE SIZE */
- if (cb_size < PAGE_SIZE)
- cb_size = PAGE_SIZE;
-
- if (ctx_id == HL_KERNEL_ASID_ID &&
- cb_size <= hdev->asic_prop.cb_pool_cb_size) {
-
- spin_lock(&hdev->cb_pool_lock);
- if (!list_empty(&hdev->cb_pool)) {
- cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
- pool_list);
- list_del(&cb->pool_list);
- spin_unlock(&hdev->cb_pool_lock);
- alloc_new_cb = false;
- } else {
- spin_unlock(&hdev->cb_pool_lock);
- dev_dbg(hdev->dev, "CB pool is empty\n");
+ if (!internal_cb) {
+ /* Minimum allocation must be PAGE SIZE */
+ if (cb_size < PAGE_SIZE)
+ cb_size = PAGE_SIZE;
+
+ if (ctx_id == HL_KERNEL_ASID_ID &&
+ cb_size <= hdev->asic_prop.cb_pool_cb_size) {
+
+ spin_lock(&hdev->cb_pool_lock);
+ if (!list_empty(&hdev->cb_pool)) {
+ cb = list_first_entry(&hdev->cb_pool,
+ typeof(*cb), pool_list);
+ list_del(&cb->pool_list);
+ spin_unlock(&hdev->cb_pool_lock);
+ alloc_new_cb = false;
+ } else {
+ spin_unlock(&hdev->cb_pool_lock);
+ dev_dbg(hdev->dev, "CB pool is empty\n");
+ }
}
}
if (alloc_new_cb) {
- cb = hl_cb_alloc(hdev, cb_size, ctx_id);
+ cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
if (!cb) {
rc = -ENOMEM;
goto out_err;
@@ -229,8 +250,8 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->cb_mgr,
- args->in.cb_size, &handle,
- hpriv->ctx->asid);
+ args->in.cb_size, &handle,
+ hpriv->ctx->asid, false);
}
memset(args, 0, sizeof(*args));
@@ -398,14 +419,15 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
idr_destroy(&mgr->cb_handles);
}
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb)
{
u64 cb_handle;
struct hl_cb *cb;
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, internal_cb);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -437,7 +459,7 @@ int hl_cb_pool_init(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
- HL_KERNEL_ASID_ID);
+ HL_KERNEL_ASID_ID, false);
if (cb) {
cb->is_pool = true;
list_add(&cb->pool_list, &hdev->cb_pool);
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index f3a8f113865d..b9840e368eb5 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -246,6 +246,18 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
kfree(job);
}
+static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
+{
+ hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
+ ctx->cs_counters.device_in_reset_drop_cnt;
+ hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
+ ctx->cs_counters.out_of_mem_drop_cnt;
+ hdev->aggregated_cs_counters.parsing_drop_cnt +=
+ ctx->cs_counters.parsing_drop_cnt;
+ hdev->aggregated_cs_counters.queue_full_drop_cnt +=
+ ctx->cs_counters.queue_full_drop_cnt;
+}
+
static void cs_do_release(struct kref *ref)
{
struct hl_cs *cs = container_of(ref, struct hl_cs,
@@ -349,13 +361,16 @@ static void cs_do_release(struct kref *ref)
dma_fence_signal(cs->fence);
dma_fence_put(cs->fence);
+ cs_counters_aggregate(hdev, cs->ctx);
+
+ kfree(cs->jobs_in_queue_cnt);
kfree(cs);
}
static void cs_timedout(struct work_struct *work)
{
struct hl_device *hdev;
- int ctx_asid, rc;
+ int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
rc = cs_get_unless_zero(cs);
@@ -371,11 +386,10 @@ static void cs_timedout(struct work_struct *work)
cs->timedout = true;
hdev = cs->ctx->hdev;
- ctx_asid = cs->ctx->asid;
- /* TODO: add information about last signaled seq and last emitted seq */
- dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
- ctx_asid, cs->sequence);
+ dev_err(hdev->dev,
+ "Command submission %llu has not finished in time!\n",
+ cs->sequence);
cs_put(cs);
@@ -418,21 +432,29 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
spin_lock(&ctx->cs_lock);
cs_cmpl->cs_seq = ctx->cs_sequence;
- other = ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ other = ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
- spin_unlock(&ctx->cs_lock);
dev_dbg(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
rc = -EAGAIN;
goto free_fence;
}
+ cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
+ sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
+ if (!cs->jobs_in_queue_cnt) {
+ rc = -ENOMEM;
+ goto free_fence;
+ }
+
dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
ctx->asid, ctx->cs_sequence);
cs->sequence = cs_cmpl->cs_seq;
- ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ ctx->cs_pending[cs_cmpl->cs_seq &
+ (hdev->asic_prop.max_pending_cs - 1)] =
&cs_cmpl->base_fence;
ctx->cs_sequence++;
@@ -447,6 +469,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return 0;
free_fence:
+ spin_unlock(&ctx->cs_lock);
kfree(cs_cmpl);
free_cs:
kfree(cs);
@@ -463,10 +486,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
void hl_cs_rollback_all(struct hl_device *hdev)
{
+ int i;
struct hl_cs *cs, *tmp;
/* flush all completions */
- flush_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ flush_workqueue(hdev->cq_wq[i]);
/* Make sure we don't have leftovers in the H/W queues mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
@@ -502,7 +527,7 @@ static int validate_queue_index(struct hl_device *hdev,
/* This must be checked here to prevent out-of-bounds access to
* hw_queues_props array
*/
- if (chunk->queue_index >= HL_MAX_QUEUES) {
+ if (chunk->queue_index >= asic->max_queues) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
@@ -511,7 +536,7 @@ static int validate_queue_index(struct hl_device *hdev,
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
- dev_err(hdev->dev, "Queue index %d is not applicable\n",
+ dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
}
@@ -638,12 +663,15 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
- if (rc)
+ if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
goto free_cs_object;
+ }
if (is_kernel_allocated_cb) {
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
rc = -EINVAL;
goto free_cs_object;
}
@@ -657,6 +685,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
+ hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
if (is_kernel_allocated_cb)
@@ -689,6 +718,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
cs->ctx->asid, cs->sequence, job->id, rc);
@@ -697,6 +727,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
+ hpriv->ctx->cs_counters.parsing_drop_cnt++;
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -746,6 +777,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
+ enum hl_queue_type q_type;
u64 *signal_seq_arr = NULL, signal_seq;
u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
int rc;
@@ -778,9 +810,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
q_idx = chunk->queue_index;
hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+ q_type = hw_queue_prop->type;
- if ((q_idx >= HL_MAX_QUEUES) ||
- (hw_queue_prop->type != QUEUE_TYPE_EXT)) {
+ if ((q_idx >= hdev->asic_prop.max_queues) ||
+ (!hw_queue_prop->supports_sync_stream)) {
dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
goto free_cs_chunk_array;
@@ -877,25 +910,28 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
*cs_seq = cs->sequence;
- job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ job = hl_cs_allocate_job(hdev, q_type, true);
if (!job) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
goto put_cs;
}
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (cs->type == CS_TYPE_WAIT)
+ cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+ else
+ cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+ cb = hl_cb_kernel_create(hdev, cb_size,
+ q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
if (!cb) {
+ ctx->cs_counters.out_of_mem_drop_cnt++;
kfree(job);
rc = -EFAULT;
goto put_cs;
}
- if (cs->type == CS_TYPE_WAIT)
- cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
- else
- cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
-
job->id = 0;
job->cs = cs;
job->user_cb = cb;
@@ -1134,7 +1170,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
rc = PTR_ERR(fence);
if (rc == -EINVAL)
dev_notice_ratelimited(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu\n",
+ "Can't wait on CS %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
} else if (fence) {
rc = dma_fence_wait_timeout(fence, true, timeout);
@@ -1167,15 +1203,21 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
memset(args, 0, sizeof(*args));
if (rc < 0) {
- dev_err_ratelimited(hdev->dev,
- "Error %ld on waiting for CS handle %llu\n",
- rc, seq);
if (rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for CS handle %llu\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
} else if (rc == -ETIMEDOUT) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has timed-out while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
} else if (rc == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "CS %llu has been aborted while user process is waiting for it\n",
+ seq);
args->out.status = HL_WAIT_CS_STATUS_ABORTED;
}
return rc;
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/common/context.c
index ec92b3506b1f..3e375958e73b 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -22,9 +22,11 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
* to this function unless the ref count is 0
*/
- for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
+ for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
dma_fence_put(ctx->cs_pending[i]);
+ kfree(ctx->cs_pending);
+
if (ctx->asid != HL_KERNEL_ASID_ID) {
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
@@ -110,8 +112,7 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
return;
dev_warn(hdev->dev,
- "Context %d closed or terminated but its CS are executing\n",
- ctx->asid);
+ "user process released device but its command submissions are still executing\n");
}
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
@@ -126,34 +127,49 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
+ ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL);
+ if (!ctx->cs_pending)
+ return -ENOMEM;
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mmu ctx module\n");
- goto mem_ctx_err;
+ goto err_free_cs_pending;
}
} else {
ctx->asid = hl_asid_alloc(hdev);
if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_cs_pending;
}
rc = hl_vm_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM;
- goto mem_ctx_err;
+ goto err_asid_free;
+ }
+
+ rc = hdev->asic_funcs->ctx_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev, "ctx_init failed\n");
+ goto err_vm_ctx_fini;
}
}
return 0;
-mem_ctx_err:
- if (ctx->asid != HL_KERNEL_ASID_ID)
- hl_asid_free(hdev, ctx->asid);
+err_vm_ctx_fini:
+ hl_vm_ctx_fini(ctx);
+err_asid_free:
+ hl_asid_free(hdev, ctx->asid);
+err_free_cs_pending:
+ kfree(ctx->cs_pending);
return rc;
}
@@ -170,6 +186,7 @@ int hl_ctx_put(struct hl_ctx *ctx)
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
+ struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
@@ -179,13 +196,13 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
return ERR_PTR(-EINVAL);
}
- if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
+ if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
spin_unlock(&ctx->cs_lock);
return NULL;
}
fence = dma_fence_get(
- ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
+ ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)]);
spin_unlock(&ctx->cs_lock);
return fence;
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 0bc036e01ee8..c50c6fc9e905 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
#include <linux/debugfs.h>
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/common/device.c
index 59608d1bac88..be16b75bdfdb 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
*/
static int device_early_init(struct hl_device *hdev)
{
- int rc;
+ int i, rc;
+ char workq_name[32];
switch (hdev->asic_type) {
case ASIC_GOYA:
@@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev)
if (rc)
goto early_fini;
- hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0);
- if (hdev->cq_wq == NULL) {
- dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
- rc = -ENOMEM;
- goto asid_fini;
+ if (hdev->asic_prop.completion_queues_count) {
+ hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
+ sizeof(*hdev->cq_wq),
+ GFP_ATOMIC);
+ if (!hdev->cq_wq) {
+ rc = -ENOMEM;
+ goto asid_fini;
+ }
+ }
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
+ snprintf(workq_name, 32, "hl-free-jobs-%u", i);
+ hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
+ if (hdev->cq_wq == NULL) {
+ dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
+ rc = -ENOMEM;
+ goto free_cq_wq;
+ }
}
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
@@ -321,7 +335,10 @@ free_chip_info:
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
- destroy_workqueue(hdev->cq_wq);
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ if (hdev->cq_wq[i])
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
asid_fini:
hl_asid_fini(hdev);
early_fini:
@@ -339,6 +356,8 @@ early_fini:
*/
static void device_early_fini(struct hl_device *hdev)
{
+ int i;
+
mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
@@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
- destroy_workqueue(hdev->cq_wq);
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ destroy_workqueue(hdev->cq_wq[i]);
+ kfree(hdev->cq_wq);
hl_asid_fini(hdev);
@@ -838,6 +860,22 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
if (rc)
return 0;
+ if (hard_reset) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev,
+ ARMCP_PACKET_DISABLE_PCI_ACCESS))
+ dev_warn(hdev->dev,
+ "Failed to disable PCI access by F/W\n");
+ }
+
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -995,6 +1033,12 @@ again:
}
}
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev,
@@ -1002,8 +1046,6 @@ again:
goto out_err;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
@@ -1144,14 +1186,17 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
- hdev->completion_queue = kcalloc(cq_cnt,
- sizeof(*hdev->completion_queue),
- GFP_KERNEL);
+ if (cq_cnt) {
+ hdev->completion_queue = kcalloc(cq_cnt,
+ sizeof(*hdev->completion_queue),
+ GFP_KERNEL);
- if (!hdev->completion_queue) {
- dev_err(hdev->dev, "failed to allocate completion queues\n");
- rc = -ENOMEM;
- goto hw_queues_destroy;
+ if (!hdev->completion_queue) {
+ dev_err(hdev->dev,
+ "failed to allocate completion queues\n");
+ rc = -ENOMEM;
+ goto hw_queues_destroy;
+ }
}
for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
@@ -1162,6 +1207,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
"failed to initialize completion queue\n");
goto cq_fini;
}
+ hdev->completion_queue[i].cq_idx = i;
}
/*
@@ -1219,6 +1265,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
*/
add_cdev_sysfs_on_err = true;
+ /* Device is now enabled as part of the initialization requires
+ * communication with the device firmware to get information that
+ * is required for the initialization itself
+ */
+ hdev->disabled = false;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1226,8 +1278,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto out_disabled;
}
- hdev->disabled = false;
-
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index d27841cb5bcb..f70302cdab1b 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hl_boot_if.h"
+#include "../include/common/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/genalloc.h>
@@ -15,7 +15,10 @@
/**
* hl_fw_load_fw_to_device() - Load F/W code to device's memory.
+ *
* @hdev: pointer to hl_device structure.
+ * @fw_name: the firmware image name
+ * @dst: IO memory mapped address space to copy firmware to
*
* Copy fw code from firmware file to device memory.
*
@@ -286,7 +289,7 @@ int hl_fw_armcp_info_get(struct hl_device *hdev)
HL_ARMCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP info pkt, error %d\n", rc);
+ "Failed to handle ArmCP info pkt, error %d\n", rc);
goto out;
}
@@ -337,7 +340,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
if (rc) {
dev_err(hdev->dev,
- "Failed to send ArmCP EEPROM packet, error %d\n", rc);
+ "Failed to handle ArmCP EEPROM packet, error %d\n", rc);
goto out;
}
@@ -390,6 +393,53 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
"Device boot error - NIC F/W initialization failed\n");
}
+static void hl_detect_cpu_boot_status(struct hl_device *hdev, u32 status)
+{
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "Device boot error - BTL did NOT run\n");
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck inside WFE loop\n");
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in BTL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in Preboot\n");
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in SPL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in u-boot\n");
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "Device boot error - u-boot stopped by user\n");
+ break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Device boot error - Invalid status code %d\n",
+ status);
+ break;
+ }
+}
+
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 boot_err0_reg, bool skip_bmc,
@@ -463,50 +513,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
* versions but we keep them here for backward compatibility
*/
if (rc) {
- switch (status) {
- case CPU_BOOT_STATUS_NA:
- dev_err(hdev->dev,
- "Device boot error - BTL did NOT run\n");
- break;
- case CPU_BOOT_STATUS_IN_WFE:
- dev_err(hdev->dev,
- "Device boot error - Stuck inside WFE loop\n");
- break;
- case CPU_BOOT_STATUS_IN_BTL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in BTL\n");
- break;
- case CPU_BOOT_STATUS_IN_PREBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in Preboot\n");
- break;
- case CPU_BOOT_STATUS_IN_SPL:
- dev_err(hdev->dev,
- "Device boot error - Stuck in SPL\n");
- break;
- case CPU_BOOT_STATUS_IN_UBOOT:
- dev_err(hdev->dev,
- "Device boot error - Stuck in u-boot\n");
- break;
- case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- break;
- case CPU_BOOT_STATUS_UBOOT_NOT_READY:
- dev_err(hdev->dev,
- "Device boot error - u-boot stopped by user\n");
- break;
- case CPU_BOOT_STATUS_TS_INIT_FAIL:
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- break;
- default:
- dev_err(hdev->dev,
- "Device boot error - Invalid status code %d\n",
- status);
- break;
- }
-
+ hl_detect_cpu_boot_status(hdev, status);
rc = -EIO;
goto out;
}
@@ -566,7 +573,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
- "Device failed to load, %d\n", status);
+ "Failed to load firmware to device, %d\n",
+ status);
rc = -EIO;
goto out;
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 194d83352696..018d9d67e8e6 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -8,8 +8,9 @@
#ifndef HABANALABSP_H_
#define HABANALABSP_H_
-#include "include/armcp_if.h"
-#include "include/qman_if.h"
+#include "../include/common/armcp_if.h"
+#include "../include/common/qman_if.h"
+#include <uapi/misc/habanalabs.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
@@ -40,11 +41,6 @@
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
-#define HL_MAX_QUEUES 128
-
-/* MUST BE POWER OF 2 and larger than 1 */
-#define HL_MAX_PENDING_CS 64
-
#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
/* Memory */
@@ -53,6 +49,10 @@
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+/*
+ * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
+ * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
+ */
#define HL_RSVD_SOBS 4
#define HL_RSVD_MONS 2
@@ -61,6 +61,11 @@
#define HL_MAX_SOB_VAL (1 << 15)
+#define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
+#define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
+
+#define HL_PCI_NUM_BARS 6
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -86,6 +91,16 @@ struct hl_device;
struct hl_fpriv;
/**
+ * enum hl_pci_match_mode - pci match mode per region
+ * @PCI_ADDRESS_MATCH_MODE: address match mode
+ * @PCI_BAR_MATCH_MODE: bar match mode
+ */
+enum hl_pci_match_mode {
+ PCI_ADDRESS_MATCH_MODE,
+ PCI_BAR_MATCH_MODE
+};
+
+/**
* enum hl_fw_component - F/W components to read version through registers.
* @FW_COMP_UBOOT: u-boot.
* @FW_COMP_PREBOOT: preboot.
@@ -121,6 +136,32 @@ enum hl_cs_type {
};
/*
+ * struct hl_inbound_pci_region - inbound region descriptor
+ * @mode: pci match mode for this region
+ * @addr: region target address
+ * @size: region size in bytes
+ * @offset_in_bar: offset within bar (address match mode)
+ * @bar: bar id
+ */
+struct hl_inbound_pci_region {
+ enum hl_pci_match_mode mode;
+ u64 addr;
+ u64 size;
+ u64 offset_in_bar;
+ u8 bar;
+};
+
+/*
+ * struct hl_outbound_pci_region - outbound region descriptor
+ * @addr: region target address
+ * @size: region size in bytes
+ */
+struct hl_outbound_pci_region {
+ u64 addr;
+ u64 size;
+};
+
+/*
* struct hl_hw_sob - H/W SOB info.
* @hdev: habanalabs device structure.
* @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
@@ -141,11 +182,13 @@ struct hl_hw_sob {
* false otherwise.
* @requires_kernel_cb: true if a CB handle must be provided for jobs on this
* queue, false otherwise (a CB address must be provided).
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
u8 requires_kernel_cb;
+ u8 supports_sync_stream;
};
/**
@@ -241,14 +284,19 @@ struct hl_mmu_properties {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
+ * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
+ * @max_pending_cs: maximum of concurrent pending command submissions
+ * @max_queues: maximum amount of queues in the system
+ * @sync_stream_first_sob: first sync object available for sync stream use
+ * @sync_stream_first_mon: first monitor available for sync stream use
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
- struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
+ struct hw_queue_properties *hw_queues_props;
struct armcp_info armcp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
@@ -282,9 +330,14 @@ struct asic_fixed_properties {
u32 psoc_pci_pll_nf;
u32 psoc_pci_pll_od;
u32 psoc_pci_pll_div_factor;
+ u32 psoc_timestamp_frequency;
u32 high_pll;
u32 cb_pool_cb_cnt;
u32 cb_pool_cb_size;
+ u32 max_pending_cs;
+ u32 max_queues;
+ u16 sync_stream_first_sob;
+ u16 sync_stream_first_mon;
u8 tpc_enabled_mask;
u8 completion_queues_count;
};
@@ -339,6 +392,7 @@ struct hl_cb_mgr {
* @ctx_id: holds the ID of the owner's context.
* @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
+ * @is_internal: internaly allocated
*/
struct hl_cb {
struct kref refcount;
@@ -355,6 +409,7 @@ struct hl_cb {
u32 ctx_id;
u8 mmap;
u8 is_pool;
+ u8 is_internal;
};
@@ -364,38 +419,19 @@ struct hl_cb {
struct hl_cs_job;
-/*
- * Currently, there are two limitations on the maximum length of a queue:
- *
- * 1. The memory footprint of the queue. The current allocated space for the
- * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
- * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
- * which currently is 4096/16 = 256 entries.
- *
- * To increase that, we need either to decrease the size of the
- * BD (difficult), or allocate more than a single page (easier).
- *
- * 2. Because the size of the JOB handle field in the BD CTL / completion queue
- * is 10-bit, we can have up to 1024 open jobs per hardware queue.
- * Therefore, each queue can hold up to 1024 entries.
- *
- * HL_QUEUE_LENGTH is in units of struct hl_bd.
- * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
- */
-
-#define HL_PAGE_SIZE 4096 /* minimum page size */
-/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
-#define HL_QUEUE_LENGTH 256
+/* Queue length of external and HW queues */
+#define HL_QUEUE_LENGTH 4096
#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
-/*
- * HL_CQ_LENGTH is in units of struct hl_cq_entry.
- * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
- */
+#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
+#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
+#endif
+
+/* HL_CQ_LENGTH is in units of struct hl_cq_entry */
#define HL_CQ_LENGTH HL_QUEUE_LENGTH
#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
-/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
+/* Must be power of 2 */
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
@@ -422,6 +458,7 @@ struct hl_cs_job;
* exist).
* @curr_sob_offset: the id offset to the currently used SOB from the
* HL_RSVD_SOBS that are being used by this queue.
+ * @supports_sync_stream: True if queue supports sync stream
*/
struct hl_hw_queue {
struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
@@ -430,7 +467,7 @@ struct hl_hw_queue {
u64 kernel_address;
dma_addr_t bus_address;
u32 pi;
- u32 ci;
+ atomic_t ci;
u32 hw_queue_id;
u32 cq_id;
u32 msi_vec;
@@ -440,6 +477,7 @@ struct hl_hw_queue {
u16 base_mon_id;
u8 valid;
u8 curr_sob_offset;
+ u8 supports_sync_stream;
};
/**
@@ -447,6 +485,7 @@ struct hl_hw_queue {
* @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address
+ * @cq_idx: completion queue index in array
* @hw_queue_id: the id of the matching H/W queue
* @ci: ci inside the queue
* @pi: pi inside the queue
@@ -456,6 +495,7 @@ struct hl_cq {
struct hl_device *hdev;
u64 kernel_address;
dma_addr_t bus_address;
+ u32 cq_idx;
u32 hw_queue_id;
u32 ci;
u32 pi;
@@ -519,6 +559,15 @@ enum hl_pll_frequency {
PLL_LAST
};
+#define PLL_REF_CLK 50
+
+enum div_select_defs {
+ DIV_SEL_REF_CLK = 0,
+ DIV_SEL_PLL_CLK = 1,
+ DIV_SEL_DIVIDED_REF = 2,
+ DIV_SEL_DIVIDED_PLL = 3,
+};
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -601,14 +650,13 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @ctx_init: context dependent initialization.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
* @read_device_fw_version: read the device's firmware versions that are
* contained in registers
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
- * @ext_queue_init: Initialize the given external queue.
- * @ext_queue_reset: Reset the given external queue.
* @get_signal_cb_size: Get signal CB size.
* @get_wait_cb_size: Get wait CB size.
* @gen_signal_cb: Generate a signal CB.
@@ -705,14 +753,13 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*ctx_init)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
void (*read_device_fw_version)(struct hl_device *hdev,
enum hl_fw_component fwc);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
- void (*ext_queue_init)(struct hl_device *hdev, u32 hw_queue_id);
- void (*ext_queue_reset)(struct hl_device *hdev, u32 hw_queue_id);
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
@@ -748,7 +795,6 @@ struct hl_va_range {
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
- * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
@@ -782,18 +828,18 @@ struct hl_va_range {
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
- DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
- struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
+ struct dma_fence **cs_pending;
struct hl_va_range *host_va_range;
struct hl_va_range *host_huge_va_range;
struct hl_va_range *dram_va_range;
struct mutex mem_hash_lock;
struct mutex mmu_lock;
struct list_head debugfs_list;
+ struct hl_cs_counters cs_counters;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -868,7 +914,7 @@ struct hl_userptr {
* @aborted: true if CS was aborted due to some device error.
*/
struct hl_cs {
- u16 jobs_in_queue_cnt[HL_MAX_QUEUES];
+ u16 *jobs_in_queue_cnt;
struct hl_ctx *ctx;
struct list_head job_list;
spinlock_t job_lock;
@@ -1352,7 +1398,9 @@ struct hl_device_idle_busy_ts {
/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
- * @pcie_bar: array of available PCIe bars.
+ * @pcie_bar_phys: array of available PCIe bars physical addresses.
+ * (required only for PCI address match mode)
+ * @pcie_bar: array of available PCIe bars virtual addresses.
* @rmmio: configuration area address on SRAM.
* @cdev: related char device.
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
@@ -1363,7 +1411,8 @@ struct hl_device_idle_busy_ts {
* @asic_name: ASIC specific nmae.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
- * @cq_wq: work queue of completion queues for executing work in process context
+ * @cq_wq: work queues of completion queues for executing work in process
+ * context.
* @eq_wq: work queue of event queue for executing work in process context.
* @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
@@ -1392,12 +1441,17 @@ struct hl_device_idle_busy_ts {
* @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool.
+ * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
+ * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
+ * @internal_cb_pool: internal command buffer memory pool.
+ * @internal_cb_va_base: internal cb pool mmu virtual address base
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
* @fpriv_list_lock: protects the fpriv_list
* @compute_ctx: current compute context executing.
* @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
* and vice-versa
+ * @aggregated_cs_counters: aggregated cs counters among all contexts
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1442,12 +1496,14 @@ struct hl_device_idle_busy_ts {
* @cdev_sysfs_created: were char devices and sysfs nodes created.
* @stop_on_err: true if engines should stop on error.
* @supports_sync_stream: is sync stream supported.
+ * @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
*/
struct hl_device {
struct pci_dev *pdev;
- void __iomem *pcie_bar[6];
+ u64 pcie_bar_phys[HL_PCI_NUM_BARS];
+ void __iomem *pcie_bar[HL_PCI_NUM_BARS];
void __iomem *rmmio;
struct cdev cdev;
struct cdev cdev_ctrl;
@@ -1458,7 +1514,7 @@ struct hl_device {
char asic_name[16];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
- struct workqueue_struct *cq_wq;
+ struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues;
@@ -1490,6 +1546,11 @@ struct hl_device {
struct list_head cb_pool;
spinlock_t cb_pool_lock;
+ void *internal_cb_pool_virt_addr;
+ dma_addr_t internal_cb_pool_dma_addr;
+ struct gen_pool *internal_cb_pool;
+ u64 internal_cb_va_base;
+
struct list_head fpriv_list;
struct mutex fpriv_list_lock;
@@ -1497,6 +1558,8 @@ struct hl_device {
struct hl_device_idle_busy_ts *idle_busy_ts_arr;
+ struct hl_cs_counters aggregated_cs_counters;
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
@@ -1529,6 +1592,7 @@ struct hl_device {
u8 cdev_sysfs_created;
u8 stop_on_err;
u8 supports_sync_stream;
+ u8 sync_stream_queue_idx;
u8 supports_coresight;
u8 supports_soft_reset;
@@ -1697,7 +1761,7 @@ int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
- u64 *handle, int ctx_id);
+ u64 *handle, int ctx_id, bool internal_cb);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
@@ -1705,7 +1769,8 @@ struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
void hl_cb_put(struct hl_cb *cb);
void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
-struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
+struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
+ bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
@@ -1769,9 +1834,10 @@ int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
u64 addr);
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size);
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region);
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region);
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 22716da9f85f..c6b31e93fb5e 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -238,7 +238,6 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->axi_drain = 0;
hdev->sram_scrambler_enable = 1;
hdev->dram_scrambler_enable = 1;
- hdev->rl_enable = 1;
hdev->bmc_enable = 1;
hdev->hard_reset_on_fw_events = 1;
}
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 52eedd3a6c3a..5af1c03da473 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -276,6 +276,27 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
}
+static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_cs_counters cs_counters = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ if (hpriv->ctx)
+ memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters,
+ sizeof(struct hl_cs_counters));
+
+ return copy_to_user(out, &cs_counters,
+ min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -336,6 +357,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
+ case HL_INFO_CS_COUNTERS:
+ return cs_counters_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index f4434b39ef1b..287681646071 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -23,10 +23,14 @@ inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
return ptr;
}
+static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
+{
+ return atomic_read(ci) & ((queue_len << 1) - 1);
+}
static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
{
- int delta = (q->pi - q->ci);
+ int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
if (delta >= 0)
return (queue_len - delta);
@@ -40,21 +44,14 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
struct hl_hw_queue *q;
int i;
- hdev->asic_funcs->hw_queues_lock(hdev);
-
if (hdev->disabled)
- goto out;
+ return;
q = &hdev->kernel_queues[0];
- for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_INT) {
- q->ci += cs->jobs_in_queue_cnt[i];
- q->ci &= ((q->int_queue_len << 1) - 1);
- }
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
+ if (q->queue_type == QUEUE_TYPE_INT)
+ atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
}
-
-out:
- hdev->asic_funcs->hw_queues_unlock(hdev);
}
/*
@@ -161,6 +158,13 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
{
int free_slots_cnt;
+ if (num_of_entries > q->int_queue_len) {
+ dev_err(hdev->dev,
+ "Cannot populate queue %u with %u jobs\n",
+ q->hw_queue_id, num_of_entries);
+ return -ENOMEM;
+ }
+
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
@@ -174,38 +178,26 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
- * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
* @hdev: Pointer to hl_device structure.
* @q: Pointer to hl_hw_queue structure.
* @num_of_entries: How many entries to check for space.
*
- * Perform the following:
- * - Make sure we have enough space in the completion queue.
- * This check also ensures that there is enough space in the h/w queue, as
- * both queues are of the same size.
- * - Reserve space in the completion queue (needs to be reversed if there
- * is a failure down the road before the actual submission of work).
+ * Notice: We do not reserve queue entries so this function mustn't be called
+ * more than once per CS for the same queue
*
- * Both operations are done using the "free_slots_cnt" field of the completion
- * queue. The CI counters of the queue and the completion queue are not
- * needed/used for the H/W queue type.
*/
static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
- atomic_t *free_slots =
- &hdev->completion_queue[q->cq_id].free_slots_cnt;
+ int free_slots_cnt;
- /*
- * Check we have enough space in the completion queue.
- * Add -1 to counter (decrement) unless counter was already 0.
- * In that case, CQ is full so we can't submit a new CB.
- * atomic_add_unless will return 0 if counter was already 0.
- */
- if (atomic_add_negative(num_of_entries * -1, free_slots)) {
- dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
- num_of_entries, q->hw_queue_id);
- atomic_add(num_of_entries, free_slots);
+ /* Check we have enough space in the queue */
+ free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
+
+ if (free_slots_cnt < num_of_entries) {
+ dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
+ q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
@@ -366,7 +358,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
- struct hl_cq *cq;
u64 ptr;
u32 offset, ctl, len;
@@ -376,7 +367,7 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
* write address offset in the SM block (QMAN LBW message).
* The write address offset is calculated as "COMP_OFFSET << 2".
*/
- offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
@@ -395,17 +386,6 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
else
ptr = (u64) (uintptr_t) job->user_cb;
- /*
- * No need to protect pi_offset because scheduling to the
- * H/W queues is done under the scheduler mutex
- *
- * No need to check if CQ is full because it was already
- * checked in hw_queue_sanity_checks
- */
- cq = &hdev->completion_queue[q->cq_id];
-
- cq->pi = hl_cq_inc_ptr(cq->pi);
-
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
@@ -509,19 +489,23 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
+ u32 max_queues;
int rc = 0, i, cq_cnt;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hl_device_disabled_or_in_reset(hdev)) {
+ ctx->cs_counters.device_in_reset_drop_cnt++;
dev_err(hdev->dev,
"device is disabled or in reset, CS rejected!\n");
rc = -EPERM;
goto out;
}
+ max_queues = hdev->asic_prop.max_queues;
+
q = &hdev->kernel_queues[0];
- for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
if (cs->jobs_in_queue_cnt[i]) {
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
@@ -543,11 +527,12 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
break;
}
- if (rc)
+ if (rc) {
+ ctx->cs_counters.queue_full_drop_cnt++;
goto unroll_cq_resv;
+ }
- if (q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW)
+ if (q->queue_type == QUEUE_TYPE_EXT)
cq_cnt++;
}
}
@@ -598,10 +583,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
unroll_cq_resv:
q = &hdev->kernel_queues[0];
- for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT ||
- q->queue_type == QUEUE_TYPE_HW) &&
- cs->jobs_in_queue_cnt[i]) {
+ for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
+ if ((q->queue_type == QUEUE_TYPE_EXT) &&
+ (cs->jobs_in_queue_cnt[i])) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -625,7 +609,7 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- q->ci = hl_queue_inc_ptr(q->ci);
+ atomic_inc(&q->ci);
}
static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
@@ -660,12 +644,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
}
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
- if (!is_cpu_queue)
- hdev->asic_funcs->ext_queue_init(hdev, q->hw_queue_id);
-
return 0;
free_queue:
@@ -697,7 +678,7 @@ static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
q->pi = 0;
- q->ci = 0;
+ atomic_set(&q->ci, 0);
return 0;
}
@@ -726,12 +707,48 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
q->kernel_address = (u64) (uintptr_t) p;
/* Make sure read/write pointers are initialized to start of queue */
- q->ci = 0;
+ atomic_set(&q->ci, 0);
q->pi = 0;
return 0;
}
+static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_hw_sob *hw_sob;
+ int sob, queue_idx = hdev->sync_stream_queue_idx++;
+
+ hw_queue->base_sob_id =
+ prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS;
+ hw_queue->base_mon_id =
+ prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS;
+ hw_queue->next_sob_val = 1;
+ hw_queue->curr_sob_offset = 0;
+
+ for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
+ hw_sob = &hw_queue->hw_sob[sob];
+ hw_sob->hdev = hdev;
+ hw_sob->sob_id = hw_queue->base_sob_id + sob;
+ hw_sob->q_idx = q_idx;
+ kref_init(&hw_sob->kref);
+ }
+}
+
+static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+
+ /*
+ * In case we got here due to a stuck CS, the refcnt might be bigger
+ * than 1 and therefore we reset it.
+ */
+ kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
+ hw_queue->curr_sob_offset = 0;
+ hw_queue->next_sob_val = 1;
+}
+
/*
* queue_init - main initialization function for H/W queue object
*
@@ -747,8 +764,6 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
{
int rc;
- BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
q->hw_queue_id = hw_queue_id;
switch (q->queue_type) {
@@ -774,6 +789,9 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
break;
}
+ if (q->supports_sync_stream)
+ sync_stream_queue_init(hdev, q->hw_queue_id);
+
if (rc)
return rc;
@@ -835,7 +853,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
struct hl_hw_queue *q;
int i, rc, q_ready_cnt;
- hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
+ hdev->kernel_queues = kcalloc(asic->max_queues,
sizeof(*hdev->kernel_queues), GFP_KERNEL);
if (!hdev->kernel_queues) {
@@ -845,9 +863,11 @@ int hl_hw_queues_create(struct hl_device *hdev)
/* Initialize the H/W queues */
for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
- i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
+ i < asic->max_queues ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
+ q->supports_sync_stream =
+ asic->hw_queues_props[i].supports_sync_stream;
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
@@ -870,9 +890,10 @@ release_queues:
void hl_hw_queues_destroy(struct hl_device *hdev)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -881,15 +902,17 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
{
struct hl_hw_queue *q;
+ u32 max_queues = hdev->asic_prop.max_queues;
int i;
- for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
+ for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
if ((!q->valid) ||
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
- q->pi = q->ci = 0;
+ q->pi = 0;
+ atomic_set(&q->ci, 0);
- if (q->queue_type == QUEUE_TYPE_EXT)
- hdev->asic_funcs->ext_queue_reset(hdev, q->hw_queue_id);
+ if (q->supports_sync_stream)
+ sync_stream_queue_reset(hdev, q->hw_queue_id);
}
}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index b997336fa75f..b997336fa75f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/common/irq.c
index fac65fbd70e8..c8db717023f5 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -10,11 +10,12 @@
#include <linux/slab.h>
/**
- * This structure is used to schedule work of EQ entry and armcp_reset event
+ * struct hl_eqe_work - This structure is used to schedule work of EQ
+ * entry and armcp_reset event
*
- * @eq_work - workqueue object to run when EQ entry is received
- * @hdev - pointer to device structure
- * @eq_entry - copy of the EQ entry
+ * @eq_work: workqueue object to run when EQ entry is received
+ * @hdev: pointer to device structure
+ * @eq_entry: copy of the EQ entry
*/
struct hl_eqe_work {
struct work_struct eq_work;
@@ -22,7 +23,7 @@ struct hl_eqe_work {
struct hl_eq_entry eq_entry;
};
-/*
+/**
* hl_cq_inc_ptr - increment ci or pi of cq
*
* @ptr: the current ci or pi value of the completion queue
@@ -38,7 +39,7 @@ inline u32 hl_cq_inc_ptr(u32 ptr)
return ptr;
}
-/*
+/**
* hl_eq_inc_ptr - increment ci of eq
*
* @ptr: the current ci value of the event queue
@@ -65,7 +66,7 @@ static void irq_handle_eqe(struct work_struct *work)
kfree(eqe_work);
}
-/*
+/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
@@ -118,15 +119,10 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if ((shadow_index_valid) && (!hdev->disabled)) {
job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
- queue_work(hdev->cq_wq, &job->finish_work);
+ queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
}
- /* Update ci of the context's queue. There is no
- * need to protect it with spinlock because this update is
- * done only inside IRQ and there is a different IRQ per
- * queue
- */
- queue->ci = hl_queue_inc_ptr(queue->ci);
+ atomic_inc(&queue->ci);
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
@@ -141,7 +137,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
-/*
+/**
* hl_irq_handler_eq - irq handler for event queue
*
* @irq: irq number
@@ -205,7 +201,7 @@ skip_irq:
return IRQ_HANDLED;
}
-/*
+/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
@@ -219,8 +215,6 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
- BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
&q->bus_address, GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -237,7 +231,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
return 0;
}
-/*
+/**
* hl_cq_fini - destroy completion queue
*
* @hdev: pointer to device structure
@@ -268,7 +262,7 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
}
-/*
+/**
* hl_eq_init - main initialization function for an event queue object
*
* @hdev: pointer to device structure
@@ -281,8 +275,6 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
- BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
-
p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
HL_EQ_SIZE_IN_BYTES,
&q->bus_address);
@@ -296,7 +288,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
return 0;
}
-/*
+/**
* hl_eq_fini - destroy event queue
*
* @hdev: pointer to device structure
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/common/memory.c
index 47da84a17719..dce9273e557a 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -7,7 +7,7 @@
#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -1730,8 +1730,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
*/
if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_notice(hdev->dev,
- "ctx %d is freed while it has va in use\n",
- ctx->asid);
+ "user released device without removing its memory mappings\n");
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index a290d6b49d78..edcc11d5eaf1 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -6,7 +6,7 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/genalloc.h>
#include <linux/slab.h>
@@ -502,7 +502,6 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
return 0;
mutex_init(&ctx->mmu_lock);
- hash_init(ctx->mmu_phys_hash);
hash_init(ctx->mmu_shadow_hash);
return dram_default_mapping_init(ctx);
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/common/pci.c
index 9f634ef6f5b3..7bd3737571f3 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -6,16 +6,22 @@
*/
#include "habanalabs.h"
-#include "include/hw_ip/pci/pci_general.h"
+#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
+#include <linux/bitfield.h>
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
+#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
+#define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
+#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
+#define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
+
/**
* hl_pci_bars_map() - Map PCI BARs.
* @hdev: Pointer to hl_device structure.
- * @bar_name: Array of BAR names.
+ * @name: Array of BAR names.
* @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
*
* Request PCI regions and map them to kernel virtual addresses.
@@ -61,7 +67,7 @@ err:
return rc;
}
-/*
+/**
* hl_pci_bars_unmap() - Unmap PCI BARS.
* @hdev: Pointer to hl_device structure.
*
@@ -80,9 +86,11 @@ static void hl_pci_bars_unmap(struct hl_device *hdev)
pci_release_regions(pdev);
}
-/*
+/**
* hl_pci_elbi_write() - Write through the ELBI interface.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -140,6 +148,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
/**
* hl_pci_iatu_write() - iatu write routine.
* @hdev: Pointer to hl_device structure.
+ * @addr: Address to write to
+ * @data: Data to write
*
* Return: 0 on success, negative value for failure.
*/
@@ -161,7 +171,7 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
return 0;
}
-/*
+/**
* hl_pci_reset_link_through_bridge() - Reset PCI link.
* @hdev: Pointer to hl_device structure.
*/
@@ -183,110 +193,94 @@ static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
}
/**
- * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
+ * hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
- * @inbound_region: Inbound region number.
- * @bar: PCI BAR number.
- * @addr: Address in DRAM. Must be aligned to DRAM bar size.
+ * @region: Inbound region number.
+ * @pci_region: Inbound region parameters.
*
- * Configure the iATU so that the DRAM bar will start at the specified address.
+ * Configure the iATU inbound region.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
- u64 addr)
+int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
+ struct hl_inbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 offset;
- int rc;
+ u64 bar_phys_base, region_base, region_end_address;
+ u32 offset, ctrl_reg_val;
+ int rc = 0;
- switch (inbound_region) {
- case 0:
- offset = 0x100;
- break;
- case 1:
- offset = 0x300;
- break;
- case 2:
- offset = 0x500;
- break;
- default:
- dev_err(hdev->dev, "Invalid inbound region %d\n",
- inbound_region);
- return -EINVAL;
- }
+ /* region offset */
+ offset = (0x200 * region) + 0x100;
+
+ if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
+ bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
+ region_base = bar_phys_base + pci_region->offset_in_bar;
+ region_end_address = region_base + pci_region->size - 1;
- if (bar != 0 && bar != 2 && bar != 4) {
- dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
- return -EINVAL;
+ rc |= hl_pci_iatu_write(hdev, offset + 0x8,
+ lower_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0xC,
+ upper_32_bits(region_base));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x10,
+ lower_32_bits(region_end_address));
}
/* Point to the specified address */
- rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
- rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
+ rc = hl_pci_iatu_write(hdev, offset + 0x14,
+ lower_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18,
+ upper_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
- /* Enable + BAR match + match enable + BAR number */
- rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
+
+ /* Enable + bar/address match + match enable + bar number */
+ ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
+ pci_region->mode);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
+
+ if (pci_region->mode == PCI_BAR_MATCH_MODE)
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
+ pci_region->bar);
+
+ rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
/* Return the DBI window to the default location */
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
if (rc)
- dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
- addr);
+ dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
+ pci_region->bar, pci_region->addr);
return rc;
}
/**
- * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
+ * hl_pci_set_outbound_region() - Configure outbound region 0
* @hdev: Pointer to hl_device structure.
- * @sram_base_address: SRAM base address.
- * @dram_base_address: DRAM base address.
- * @host_phys_base_address: Base physical address of host memory for device
- * transactions.
- * @host_phys_size: Size of host memory for device transactions.
+ * @pci_region: Outbound region parameters.
*
- * This is needed in case the firmware doesn't initialize the iATU.
+ * Configure the iATU outbound region 0.
*
* Return: 0 on success, negative value for failure.
*/
-int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
- u64 dram_base_address, u64 host_phys_base_address,
- u64 host_phys_size)
+int hl_pci_set_outbound_region(struct hl_device *hdev,
+ struct hl_outbound_pci_region *pci_region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 host_phys_end_addr;
+ u64 outbound_region_end_address;
int rc = 0;
- /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
- rc = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x100, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
-
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
-
- hdev->asic_funcs->set_dma_mask_from_fw(hdev);
-
- /* Point to DRAM */
- if (!hdev->asic_funcs->set_dram_bar_base)
- return -EINVAL;
- if (hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address) ==
- U64_MAX)
- return -EIO;
-
- /* Outbound Region 0 - Point to Host */
- host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
+ /* Outbound Region 0 */
+ outbound_region_end_address =
+ pci_region->addr + pci_region->size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
- lower_32_bits(host_phys_base_address));
+ lower_32_bits(pci_region->addr));
rc |= hl_pci_iatu_write(hdev, 0x00C,
- upper_32_bits(host_phys_base_address));
- rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
+ upper_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, 0x010,
+ lower_32_bits(outbound_region_end_address));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
@@ -294,7 +288,8 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
else
rc |= hl_pci_iatu_write(hdev, 0x018, 0);
- rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
+ rc |= hl_pci_iatu_write(hdev, 0x020,
+ upper_32_bits(outbound_region_end_address));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
/* Enable */
@@ -304,16 +299,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
- if (rc)
- return -EIO;
-
- return 0;
+ return rc;
}
/**
* hl_pci_set_dma_mask() - Set DMA masks for the device.
* @hdev: Pointer to hl_device structure.
- * @dma_mask: number of bits for the requested dma mask.
*
* This function sets the DMA masks (regular and consistent) for a specified
* value. If it doesn't succeed, it tries to set it to a fall-back value
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 70b6b1863c2e..b3cb0ac4721c 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -331,6 +331,9 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
char *data;
int rc;
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
if (!max_size)
return -EINVAL;
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
index f802cdc980ca..c9f4703cff24 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 637a9d608707..00a0a7238d81 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -6,12 +6,12 @@
*/
#include "gaudiP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_1.h"
-#include "include/gaudi/gaudi_masks.h"
-#include "include/gaudi/gaudi_fw_if.h"
-#include "include/gaudi/gaudi_reg_map.h"
-#include "include/gaudi/gaudi_async_ids_map_extended.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_1.h"
+#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_reg_map.h"
+#include "../include/gaudi/gaudi_async_ids_map_extended.h"
#include <linux/module.h>
#include <linux/pci.h>
@@ -21,6 +21,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
+#include <linux/bitfield.h>
/*
* Gaudi security scheme:
@@ -74,7 +75,6 @@
#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
-#define GAUDI_PLDM_SRESET_TIMEOUT_MSEC 14000 /* 14s */
#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
@@ -321,6 +321,13 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
};
+struct ecc_info_extract_params {
+ u64 block_address;
+ u32 num_memories;
+ bool derr;
+ bool disable_clock_gating;
+};
+
static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
u64 phys_addr);
static int gaudi_send_job_on_qman0(struct hl_device *hdev,
@@ -339,22 +346,25 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
- if (GAUDI_QUEUE_ID_SIZE >= HL_MAX_QUEUES) {
- dev_err(hdev->dev,
- "Number of H/W queues must be smaller than %d\n",
- HL_MAX_QUEUES);
- return -EFAULT;
- }
+ prop->max_queues = GAUDI_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
- for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
+ for (i = 0 ; i < prop->max_queues ; i++) {
if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 1;
+ prop->hw_queues_props[i].supports_sync_stream = 1;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
@@ -363,14 +373,13 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 0;
+ prop->hw_queues_props[i].supports_sync_stream = 0;
}
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
-
+ prop->sync_stream_first_sob = 0;
+ prop->sync_stream_first_mon = 0;
prop->dram_base_address = DRAM_PHYS_BASE;
prop->dram_size = GAUDI_HBM_SIZE_32GB;
prop->dram_end_address = prop->dram_base_address +
@@ -435,6 +444,8 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+ prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
+
return 0;
}
@@ -457,6 +468,7 @@ static int gaudi_pci_bars_map(struct hl_device *hdev)
static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -464,7 +476,10 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 2 - Bar 4 - Point to HBM */
- rc = hl_pci_set_dram_bar_base(hdev, 2, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = HBM_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
if (rc)
return U64_MAX;
@@ -478,22 +493,43 @@ static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
static int gaudi_init_iatu(struct hl_device *hdev)
{
- int rc = 0;
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
/* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
- rc = hl_pci_iatu_write(hdev, 0x314,
- lower_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x318,
- upper_32_bits(SPI_FLASH_BASE_ADDR));
- rc |= hl_pci_iatu_write(hdev, 0x300, 0);
- /* Enable + Bar match + match enable */
- rc |= hl_pci_iatu_write(hdev, 0x304, 0xC0080200);
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = CFG_BAR_ID;
+ inbound_region.addr = SPI_FLASH_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+ /* Inbound Region 2 - Bar 4 - Point to HBM */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = HBM_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
if (rc)
- return -EIO;
+ goto done;
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
static int gaudi_early_init(struct hl_device *hdev)
@@ -516,7 +552,8 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_BAR_ID),
SRAM_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -526,20 +563,26 @@ static int gaudi_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
static int gaudi_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -554,11 +597,36 @@ static int gaudi_early_fini(struct hl_device *hdev)
static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ u32 nr = RREG32(mmPSOC_CPU_PLL_NR);
+ u32 nf = RREG32(mmPSOC_CPU_PLL_NF);
+ u32 od = RREG32(mmPSOC_CPU_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
static int _gaudi_init_tpc_mem(struct hl_device *hdev,
@@ -573,7 +641,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
u8 tpc_id;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -1644,8 +1712,8 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
hbm0_wr = 0x33333333;
- hbm1_wr = 0x33333333;
hbm0_rd = 0x77777777;
+ hbm1_wr = 0x55555555;
hbm1_rd = 0xDDDDDDDD;
WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
@@ -1695,125 +1763,6 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
(1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
}
-static void gaudi_init_rate_limiter(struct hl_device *hdev)
-{
- u32 nr, nf, od, sat, rst, timeout;
- u64 freq;
-
- nr = RREG32(mmPSOC_HBM_PLL_NR);
- nf = RREG32(mmPSOC_HBM_PLL_NF);
- od = RREG32(mmPSOC_HBM_PLL_OD);
- freq = (50 * (nf + 1)) / ((nr + 1) * (od + 1));
-
- dev_dbg(hdev->dev, "HBM frequency is %lluMHz\n", freq);
-
- /* Configuration is for five (5) DDMA channels */
- if (freq == 800) {
- sat = 4;
- rst = 11;
- timeout = 15;
- } else if (freq == 900) {
- sat = 4;
- rst = 15;
- timeout = 16;
- } else if (freq == 950) {
- sat = 4;
- rst = 15;
- timeout = 15;
- } else {
- dev_warn(hdev->dev,
- "unsupported HBM frequency %lluMHz, no rate-limiters\n",
- freq);
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0, 0x111);
- WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1, 0x111);
-
- if (!hdev->rl_enable) {
- dev_info(hdev->dev, "Rate limiters disabled\n");
- return;
- }
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN, 1);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT, sat);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT, sat);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST, rst);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST, rst);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
-
- WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN, 1);
- WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN, 1);
-}
-
static void gaudi_init_golden_registers(struct hl_device *hdev)
{
u32 tpc_offset;
@@ -1823,8 +1772,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_hbm_cred(hdev);
- gaudi_init_rate_limiter(hdev);
-
hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
@@ -1845,9 +1792,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
-
- /* WA for H3-2081 */
- WREG32(mmPCIE_WRAP_MAX_OUTSTAND, 0x10ff);
}
static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
@@ -2649,29 +2593,16 @@ static void gaudi_disable_timestamp(struct hl_device *hdev)
static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
- cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
- }
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
gaudi_stop_mme_qmans(hdev);
gaudi_stop_tpc_qmans(hdev);
@@ -2696,10 +2627,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_disable_timestamp(hdev);
- if (hard_reset)
- gaudi_disable_msi(hdev);
- else
- gaudi_sync_irqs(hdev);
+ gaudi_disable_msi(hdev);
}
static int gaudi_mmu_init(struct hl_device *hdev)
@@ -2733,8 +2661,7 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
WREG32(mmMMU_UP_MMU_ENABLE, 1);
WREG32(mmMMU_UP_SPI_MASK, 0xF);
@@ -2984,16 +2911,6 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_hbm_dma_qmans(hdev);
- /*
- * Before pushing u-boot/linux to device, need to set the hbm bar to
- * base address of dram
- */
- if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
- dev_err(hdev->dev,
- "failed to map HBM bar to DRAM base address\n");
- return -EIO;
- }
-
rc = gaudi_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
@@ -3052,48 +2969,56 @@ disable_queues:
static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u32 status, reset_timeout_ms, boot_strap = 0;
+ u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0;
+
+ if (!hard_reset) {
+ dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
+ return;
+ }
if (hdev->pldm) {
- if (hard_reset)
- reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
- else
- reset_timeout_ms = GAUDI_PLDM_SRESET_TIMEOUT_MSEC;
+ reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
} else {
reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
}
- if (hard_reset) {
- /* Tell ASIC not to re-initialize PCIe */
- WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+ /* Set device to handle FLR by H/W as we will put the device CPU to
+ * halt mode
+ */
+ WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
+ PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
- boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
- /* H/W bug WA:
- * rdata[31:0] = strap_read_val;
- * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
- */
- boot_strap = (((boot_strap & 0x7FE00000) << 1) |
- (boot_strap & 0x001FFFFF));
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
-
- /* Restart BTL/BLR upon hard-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
-
- WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
- 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
- } else {
- /* Don't restart BTL/BLR upon soft-reset */
- WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 0);
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
- WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST,
- 1 << PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT);
- dev_info(hdev->dev,
- "Issued SOFT reset command, going to wait %dms\n",
- reset_timeout_ms);
- }
+ msleep(cpu_timeout_ms);
+
+ /* Tell ASIC not to re-initialize PCIe */
+ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+ boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+
+ /* H/W bug WA:
+ * rdata[31:0] = strap_read_val;
+ * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
+ */
+ boot_strap = (((boot_strap & 0x7FE00000) << 1) |
+ (boot_strap & 0x001FFFFF));
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+
+ /* Restart BTL/BLR upon hard-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -3107,18 +3032,6 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
"Timeout while waiting for device to reset 0x%x\n",
status);
- if (!hard_reset) {
- gaudi->hw_cap_initialized &= ~(HW_CAP_PCI_DMA | HW_CAP_MME |
- HW_CAP_TPC_MASK |
- HW_CAP_HBM_DMA);
-
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GAUDI_EVENT_SOFT_RESET);
- return;
- }
-
- /* We continue here only for hard-reset */
-
WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
@@ -3578,7 +3491,7 @@ static int gaudi_test_queues(struct hl_device *hdev)
{
int i, rc, ret_val = 0;
- for (i = 0 ; i < HL_MAX_QUEUES ; i++) {
+ for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
rc = gaudi_test_queue(hdev, i);
if (rc)
@@ -4159,9 +4072,8 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -4233,9 +4145,8 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -4364,11 +4275,11 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
{
struct packet_lin_dma *lin_dma_pkt;
struct hl_cs_job *job;
- u32 cb_size, ctl;
+ u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
int rc;
- cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
if (!cb)
return -EFAULT;
@@ -4393,6 +4304,15 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
goto release_cb;
}
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause && !hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+
job->id = 0;
job->user_cb = cb;
job->user_cb->cs_cnt++;
@@ -4404,11 +4324,23 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
hl_debugfs_add_job(hdev, job);
rc = gaudi_send_job_on_qman0(hdev, job);
-
hl_debugfs_remove_job(hdev, job);
kfree(job);
cb->cs_cnt--;
+ /* Verify DMA is OK */
+ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
+ if (err_cause) {
+ dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
+ rc = -EIO;
+ if (!hdev->init_done) {
+ dev_dbg(hdev->dev,
+ "Clearing DMA0 engine from errors (cause 0x%x)\n",
+ err_cause);
+ WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
+ }
+ }
+
release_cb:
hl_cb_put(cb);
hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
@@ -5254,62 +5186,76 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev)
* | |0xF4C memory wrappers 127:96 |
* +-------------------+------------------------------------------------------+
*/
-static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
- const char *block_name,
- u64 block_address, int num_memories,
- bool derr, bool disable_clock_gating)
+static int gaudi_extract_ecc_info(struct hl_device *hdev,
+ struct ecc_info_extract_params *params, u64 *ecc_address,
+ u64 *ecc_syndrom, u8 *memory_wrapper_idx)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- int num_mem_regs = num_memories / 32 + ((num_memories % 32) ? 1 : 0);
+ u32 i, num_mem_regs, reg, err_bit;
+ u64 err_addr, err_word = 0;
+ int rc = 0;
+
+ num_mem_regs = params->num_memories / 32 +
+ ((params->num_memories % 32) ? 1 : 0);
- if (block_address >= CFG_BASE)
- block_address -= CFG_BASE;
+ if (params->block_address >= CFG_BASE)
+ params->block_address -= CFG_BASE;
- if (derr)
- block_address += GAUDI_ECC_DERR0_OFFSET;
+ if (params->derr)
+ err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
else
- block_address += GAUDI_ECC_SERR0_OFFSET;
+ err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
- if (disable_clock_gating) {
+ if (params->disable_clock_gating) {
mutex_lock(&gaudi->clk_gate_mutex);
hdev->asic_funcs->disable_clock_gating(hdev);
}
- switch (num_mem_regs) {
- case 1:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x\n",
- block_name, RREG32(block_address));
- break;
- case 2:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4));
- break;
- case 3:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8));
- break;
- case 4:
- dev_err(hdev->dev,
- "%s ECC indication: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- block_name,
- RREG32(block_address), RREG32(block_address + 4),
- RREG32(block_address + 8), RREG32(block_address + 0xc));
- break;
- default:
- break;
+ /* Set invalid wrapper index */
+ *memory_wrapper_idx = 0xFF;
+
+ /* Iterate through memory wrappers, a single bit must be set */
+ for (i = 0 ; i > num_mem_regs ; i++) {
+ err_addr += i * 4;
+ err_word = RREG32(err_addr);
+ if (err_word) {
+ err_bit = __ffs(err_word);
+ *memory_wrapper_idx = err_bit + (32 * i);
+ break;
+ }
+ }
+ if (*memory_wrapper_idx == 0xFF) {
+ dev_err(hdev->dev, "ECC error information cannot be found\n");
+ rc = -EINVAL;
+ goto enable_clk_gate;
}
- if (disable_clock_gating) {
+ WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
+ *memory_wrapper_idx);
+
+ *ecc_address =
+ RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
+ *ecc_syndrom =
+ RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
+
+ /* Clear error indication */
+ reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
+ if (params->derr)
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
+ else
+ reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
+
+ WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
+
+enable_clk_gate:
+ if (params->disable_clock_gating) {
hdev->asic_funcs->set_clock_gating(hdev);
+
mutex_unlock(&gaudi->clk_gate_mutex);
}
+
+ return rc;
}
static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
@@ -5362,239 +5308,99 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
}
-static void gaudi_print_ecc_info(struct hl_device *hdev, u16 event_type)
+static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ struct hl_eq_ecc_data *ecc_data)
{
- u64 block_address;
- u8 index;
- int num_memories;
- char desc[32];
- bool derr;
- bool disable_clock_gating;
+ struct ecc_info_extract_params params;
+ u64 ecc_address = 0, ecc_syndrom = 0;
+ u8 index, memory_wrapper_idx = 0;
+ bool extract_info_from_fw;
+ int rc;
switch (event_type) {
- case GAUDI_EVENT_PCIE_CORE_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_CORE_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
- block_address = mmPCIE_CORE_BASE;
- num_memories = 51;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_IF_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
- block_address = mmPCIE_WRAP_BASE;
- num_memories = 11;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_SERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PCIE_PHY_DERR:
- snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
- block_address = mmPCIE_PHY_BASE;
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
+ case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
+ extract_info_from_fw = true;
break;
case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
index = event_type - GAUDI_EVENT_TPC0_SERR;
- block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ params.num_memories = 90;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
index = event_type - GAUDI_EVENT_TPC0_DERR;
- block_address =
+ params.block_address =
mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
- num_memories = 90;
- derr = true;
- disable_clock_gating = true;
+ params.num_memories = 90;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_SERR:
case GAUDI_EVENT_MME1_ACC_SERR:
case GAUDI_EVENT_MME2_ACC_SERR:
case GAUDI_EVENT_MME3_ACC_SERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = false;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_ACC_DERR:
case GAUDI_EVENT_MME1_ACC_DERR:
case GAUDI_EVENT_MME2_ACC_DERR:
case GAUDI_EVENT_MME3_ACC_DERR:
index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
- block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
- num_memories = 128;
- derr = true;
- disable_clock_gating = true;
+ params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 128;
+ params.derr = true;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_SERR:
case GAUDI_EVENT_MME1_SBAB_SERR:
case GAUDI_EVENT_MME2_SBAB_SERR:
case GAUDI_EVENT_MME3_SBAB_SERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = false;
- disable_clock_gating = true;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = false;
+ params.disable_clock_gating = true;
+ extract_info_from_fw = false;
break;
case GAUDI_EVENT_MME0_SBAB_DERR:
case GAUDI_EVENT_MME1_SBAB_DERR:
case GAUDI_EVENT_MME2_SBAB_DERR:
case GAUDI_EVENT_MME3_SBAB_DERR:
index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
- block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
- num_memories = 33;
- derr = true;
- disable_clock_gating = true;
- break;
- case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_SERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
- index = event_type - GAUDI_EVENT_DMA0_DERR_ECC;
- block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
- num_memories = 16;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_SERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_CPU_IF_ECC_DERR:
- block_address = mmCPU_IF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_SERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_MEM_DERR:
- block_address = mmPSOC_GLOBAL_CONF_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 4;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
- block_address = mmPSOC_CS_TRACE_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
- index = event_type - GAUDI_EVENT_SRAM0_SERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
- index = event_type - GAUDI_EVENT_SRAM0_DERR;
- block_address =
- mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
- num_memories = 2;
- derr = true;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_SERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- num_memories = 60;
- derr = false;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- index = event_type - GAUDI_EVENT_DMA_IF0_DERR;
- block_address = mmDMA_IF_W_S_BASE +
- index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
- derr = true;
- num_memories = 60;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = false;
- num_memories = 64;
- disable_clock_gating = false;
- break;
- case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
- index = event_type - GAUDI_EVENT_HBM_0_SERR;
- /* HBM Registers are at different offsets */
- block_address = mmHBM0_BASE + 0x8000 +
- index * (mmHBM1_BASE - mmHBM0_BASE);
- snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
- derr = true;
- num_memories = 64;
- disable_clock_gating = false;
- break;
+ params.block_address =
+ mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ params.num_memories = 33;
+ params.derr = true;
+ params.disable_clock_gating = true;
default:
return;
}
- gaudi_print_ecc_info_generic(hdev, desc, block_address, num_memories,
- derr, disable_clock_gating);
+ if (extract_info_from_fw) {
+ ecc_address = le64_to_cpu(ecc_data->ecc_address);
+ ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
+ memory_wrapper_idx = ecc_data->memory_wrapper_idx;
+ } else {
+ rc = gaudi_extract_ecc_info(hdev, &params, &ecc_address,
+ &ecc_syndrom, &memory_wrapper_idx);
+ if (rc)
+ return;
+ }
+
+ dev_err(hdev->dev,
+ "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
+ ecc_address, ecc_syndrom, memory_wrapper_idx);
}
static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
@@ -5644,8 +5450,6 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
- gaudi_print_ecc_info(hdev, event_type);
-
if (razwi) {
gaudi_print_razwi_info(hdev);
gaudi_print_mmu_error_info(hdev);
@@ -5875,10 +5679,15 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
- fallthrough;
- case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
case GAUDI_EVENT_MMU_DERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_GIC500:
case GAUDI_EVENT_AXI_ECC:
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
@@ -5974,6 +5783,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
fallthrough;
case GAUDI_EVENT_MMU_SERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
case GAUDI_EVENT_PCIE_DEC:
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
@@ -6458,47 +6272,14 @@ static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
-{
- return gaudi_cq_assignment[cq_idx];
-}
-
-static void gaudi_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+static int gaudi_ctx_init(struct hl_ctx *ctx)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
- struct hl_hw_sob *hw_sob;
- int sob, ext_idx = gaudi->ext_queue_idx++;
-
- /*
- * The external queues might not sit sequentially, hence use the
- * real external queue index for the SOB/MON base id.
- */
- hw_queue->base_sob_id = ext_idx * HL_RSVD_SOBS;
- hw_queue->base_mon_id = ext_idx * HL_RSVD_MONS;
- hw_queue->next_sob_val = 1;
- hw_queue->curr_sob_offset = 0;
-
- for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
- hw_sob = &hw_queue->hw_sob[sob];
- hw_sob->hdev = hdev;
- hw_sob->sob_id = hw_queue->base_sob_id + sob;
- hw_sob->q_idx = q_idx;
- kref_init(&hw_sob->kref);
- }
+ return 0;
}
-static void gaudi_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
- struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
-
- /*
- * In case we got here due to a stuck CS, the refcnt might be bigger
- * than 1 and therefore we reset it.
- */
- kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
- hw_queue->curr_sob_offset = 0;
- hw_queue->next_sob_val = 1;
+ return gaudi_cq_assignment[cq_idx];
}
static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
@@ -6523,16 +6304,17 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
memset(pkt, 0, sizeof(*pkt));
- value = 1 << GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT; /* inc by 1 */
- value |= 1 << GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT; /* add mode */
+ /* Inc by 1, Mode ADD */
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
- ctl = (sob_id * 4) << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT; /* SOB id */
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 3 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S SOB base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6545,12 +6327,12 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
memset(pkt, 0, pkt_size);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_MB_SHIFT; /* only last pkt needs MB */
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 0); /* last pkt MB */
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6566,18 +6348,19 @@ static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
memset(pkt, 0, pkt_size);
- value = (sob_id / 8) << GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT;
- value |= sob_val << GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT;
- value |= 0 << GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT; /* GREATER_OR_EQUAL */
- value |= mask << GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT;
+ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
+ 0); /* GREATER OR EQUAL*/
+ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
- ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
- ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
- ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->value = cpu_to_le32(value);
pkt->ctl = cpu_to_le32(ctl);
@@ -6591,15 +6374,14 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
memset(pkt, 0, pkt_size);
- cfg = 1 << GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT;
- cfg |= 1 << GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT;
- cfg |= 2 << GAUDI_PKT_FENCE_CFG_ID_SHIFT;
+ cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
+ cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
- ctl = 0 << GAUDI_PKT_FENCE_CTL_PRED_SHIFT;
- ctl |= PACKET_FENCE << GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT;
- ctl |= 0 << GAUDI_PKT_FENCE_CTL_EB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_RB_SHIFT;
- ctl |= 1 << GAUDI_PKT_FENCE_CTL_MB_SHIFT;
+ ctl = FIELD_PREP(GAUDI_PKT_FENCE_CTL_OPCODE_MASK, PACKET_FENCE);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 0);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
pkt->cfg = cpu_to_le32(cfg);
pkt->ctl = cpu_to_le32(ctl);
@@ -6798,13 +6580,12 @@ static const struct hl_asic_funcs gaudi_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = gaudi_halt_coresight,
+ .ctx_init = gaudi_ctx_init,
.get_clk_rate = gaudi_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.read_device_fw_version = gaudi_read_device_fw_version,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
- .ext_queue_init = gaudi_ext_queue_init,
- .ext_queue_reset = gaudi_ext_queue_reset,
.get_signal_cb_size = gaudi_get_signal_cb_size,
.get_wait_cb_size = gaudi_get_wait_cb_size,
.gen_signal_cb = gaudi_gen_signal_cb,
@@ -6817,7 +6598,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
/**
* gaudi_set_asic_funcs - set GAUDI function pointers
*
- * @*hdev: pointer to hl_device structure
+ * @hdev: pointer to hl_device structure
*
*/
void gaudi_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 41a8d9bff6bf..5dc99f6f0296 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -9,11 +9,11 @@
#define GAUDIP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/gaudi/gaudi_packets.h"
-#include "include/gaudi/gaudi.h"
-#include "include/gaudi/gaudi_async_events.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/gaudi/gaudi_packets.h"
+#include "../include/gaudi/gaudi.h"
+#include "../include/gaudi/gaudi_async_events.h"
#define NUMBER_OF_EXT_HW_QUEUES 12
#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES
@@ -57,6 +57,12 @@
#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+#define GAUDI_MAX_PENDING_CS 1024
+
+#if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS)
+#error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
#define PCI_DMA_NUMBER_OF_CHNLS 3
#define HBM_DMA_NUMBER_OF_CHNLS 5
#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \
@@ -117,14 +123,14 @@
/* Internal QMANs PQ sizes */
-#define MME_QMAN_LENGTH 64
+#define MME_QMAN_LENGTH 1024
#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define HBM_DMA_QMAN_LENGTH 64
+#define HBM_DMA_QMAN_LENGTH 1024
#define HBM_DMA_QMAN_SIZE_IN_BYTES \
(HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
-#define TPC_QMAN_LENGTH 64
+#define TPC_QMAN_LENGTH 1024
#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
@@ -228,7 +234,6 @@ struct gaudi_internal_qman_info {
* engine.
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
- * @ext_queue_idx: helper index for external queues initialization.
* @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
* 8-bit value so use u8.
*/
@@ -249,7 +254,6 @@ struct gaudi_device {
u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
u32 hw_cap_initialized;
u8 multi_msi_mode;
- u8 ext_queue_idx;
u8 mmu_cache_inv_pi;
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index bf0e062d7b87..5673ee49819e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -6,9 +6,9 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_coresight.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
-#include "include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_coresight.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/gaudi_masks.h"
#include <uapi/misc/habanalabs.h>
#include <linux/coresight.h>
@@ -392,6 +392,7 @@ static int gaudi_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -420,7 +421,10 @@ static int gaudi_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
/* SW-2176 - SW WA for HW bug */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 6dd2c2a1cd70..1076b4932ce2 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/gaudi_fw_if.h"
+#include "../include/gaudi/gaudi_fw_if.h"
void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 6a351e31fa6a..8d5d6ddee6ed 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -6,7 +6,7 @@
*/
#include "gaudiP.h"
-#include "include/gaudi/asic_reg/gaudi_regs.h"
+#include "../include/gaudi/asic_reg/gaudi_regs.h"
#define GAUDI_NUMBER_OF_RR_REGS 24
#define GAUDI_NUMBER_OF_LBW_RANGES 12
@@ -447,8 +447,7 @@ static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
* gaudi_set_block_as_protected - set the given block as protected
*
* @hdev: pointer to hl_device structure
- * @block: block base address
- *
+ * @base: block base address
*/
static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
{
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
index bd769083628e..b3f3b7b96683 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-subdir-ccflags-y += -I$(src)
-
HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
goya/goya_coresight.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 88460b2138d8..85030759b2af 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -6,10 +6,10 @@
*/
#include "goyaP.h"
-#include "include/hw_ip/mmu/mmu_general.h"
-#include "include/hw_ip/mmu/mmu_v1_0.h"
-#include "include/goya/asic_reg/goya_masks.h"
-#include "include/goya/goya_reg_map.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+#include "../include/hw_ip/mmu/mmu_v1_0.h"
+#include "../include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
@@ -338,11 +338,19 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
-void goya_get_fixed_properties(struct hl_device *hdev)
+int goya_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int i;
+ prop->max_queues = GOYA_QUEUE_ID_SIZE;
+ prop->hw_queues_props = kcalloc(prop->max_queues,
+ sizeof(struct hw_queue_properties),
+ GFP_KERNEL);
+
+ if (!prop->hw_queues_props)
+ return -ENOMEM;
+
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
@@ -362,9 +370,6 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].requires_kernel_cb = 0;
}
- for (; i < HL_MAX_QUEUES; i++)
- prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-
prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
prop->dram_base_address = DRAM_PHYS_BASE;
@@ -427,6 +432,10 @@ void goya_get_fixed_properties(struct hl_device *hdev)
strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
+
+ prop->max_pending_cs = GOYA_MAX_PENDING_CS;
+
+ return 0;
}
/*
@@ -457,6 +466,7 @@ static int goya_pci_bars_map(struct hl_device *hdev)
static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
+ struct hl_inbound_pci_region pci_region;
u64 old_addr = addr;
int rc;
@@ -464,7 +474,10 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
return old_addr;
/* Inbound Region 1 - Bar 4 - Point to DDR */
- rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
+ pci_region.mode = PCI_BAR_MATCH_MODE;
+ pci_region.bar = DDR_BAR_ID;
+ pci_region.addr = addr;
+ rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
if (rc)
return U64_MAX;
@@ -486,8 +499,35 @@ static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
*/
static int goya_init_iatu(struct hl_device *hdev)
{
- return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
- HOST_PHYS_BASE, HOST_PHYS_SIZE);
+ struct hl_inbound_pci_region inbound_region;
+ struct hl_outbound_pci_region outbound_region;
+ int rc;
+
+ /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = SRAM_CFG_BAR_ID;
+ inbound_region.addr = SRAM_BASE_ADDR;
+ rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
+ if (rc)
+ goto done;
+
+ /* Inbound Region 1 - Bar 4 - Point to DDR */
+ inbound_region.mode = PCI_BAR_MATCH_MODE;
+ inbound_region.bar = DDR_BAR_ID;
+ inbound_region.addr = DRAM_PHYS_BASE;
+ rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
+ if (rc)
+ goto done;
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
+ /* Outbound Region 0 - Point to Host */
+ outbound_region.addr = HOST_PHYS_BASE;
+ outbound_region.size = HOST_PHYS_SIZE;
+ rc = hl_pci_set_outbound_region(hdev, &outbound_region);
+
+done:
+ return rc;
}
/*
@@ -508,7 +548,11 @@ static int goya_early_init(struct hl_device *hdev)
u32 val;
int rc;
- goya_get_fixed_properties(hdev);
+ rc = goya_get_fixed_properties(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get fixed properties\n");
+ return rc;
+ }
/* Check BAR sizes */
if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
@@ -518,7 +562,8 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
SRAM_CFG_BAR_ID),
CFG_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
@@ -528,14 +573,15 @@ static int goya_early_init(struct hl_device *hdev)
(unsigned long long) pci_resource_len(pdev,
MSIX_BAR_ID),
MSIX_BAR_SIZE);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_queue_props;
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
rc = hl_pci_init(hdev);
if (rc)
- return rc;
+ goto free_queue_props;
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -545,6 +591,10 @@ static int goya_early_init(struct hl_device *hdev)
}
return 0;
+
+free_queue_props:
+ kfree(hdev->asic_prop.hw_queues_props);
+ return rc;
}
/*
@@ -557,6 +607,7 @@ static int goya_early_init(struct hl_device *hdev)
*/
static int goya_early_fini(struct hl_device *hdev)
{
+ kfree(hdev->asic_prop.hw_queues_props);
hl_pci_fini(hdev);
return 0;
@@ -593,11 +644,36 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 trace_freq = 0;
+ u32 pll_clk = 0;
+ u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
+ u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
+ u32 od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ trace_freq = PLL_REF_CLK;
+ else
+ trace_freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ trace_freq = pll_clk;
+ else
+ trace_freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d", div_sel);
+ }
- prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
- prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
- prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
- prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
}
int goya_late_init(struct hl_device *hdev)
@@ -2165,29 +2241,15 @@ static void goya_disable_timestamp(struct hl_device *hdev)
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
{
- u32 wait_timeout_ms, cpu_timeout_ms;
+ u32 wait_timeout_ms;
dev_info(hdev->dev,
"Halting compute engines and disabling interrupts\n");
- if (hdev->pldm) {
+ if (hdev->pldm)
wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
- } else {
+ else
wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
- cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
- }
-
- if (hard_reset) {
- /*
- * I don't know what is the state of the CPU so make sure it is
- * stopped in any means necessary
- */
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
- WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
- GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
- msleep(cpu_timeout_ms);
- }
goya_stop_external_queues(hdev);
goya_stop_internal_queues(hdev);
@@ -2492,14 +2554,26 @@ disable_queues:
static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
{
struct goya_device *goya = hdev->asic_specific;
- u32 reset_timeout_ms, status;
+ u32 reset_timeout_ms, cpu_timeout_ms, status;
- if (hdev->pldm)
+ if (hdev->pldm) {
reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
- else
+ cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
+ } else {
reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
+ cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
+ }
if (hard_reset) {
+ /* I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
+
+ msleep(cpu_timeout_ms);
+
goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
goya_disable_clk_rlx(hdev);
goya_set_pll_refclk(hdev);
@@ -3701,9 +3775,8 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
@@ -3775,9 +3848,8 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
- parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID, false);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -3946,8 +4018,7 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
*val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4003,8 +4074,7 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4048,9 +4118,8 @@ static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4092,9 +4161,8 @@ static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
(addr - SRAM_BASE_ADDR));
- } else if ((addr >= DRAM_PHYS_BASE) &&
- (addr <=
- DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64))) {
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
u64 bar_base_addr = DRAM_PHYS_BASE +
(addr & ~(prop->dram_pci_bar_size - 0x1ull));
@@ -4627,7 +4695,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
sizeof(struct packet_msg_prot);
- cb = hl_cb_kernel_create(hdev, cb_size);
+ cb = hl_cb_kernel_create(hdev, cb_size, false);
if (!cb)
return -ENOMEM;
@@ -5157,19 +5225,14 @@ static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
-u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+static int goya_ctx_init(struct hl_ctx *ctx)
{
- return cq_idx;
-}
-
-static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
-{
-
+ return 0;
}
-static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
{
-
+ return cq_idx;
}
static u32 goya_get_signal_cb_size(struct hl_device *hdev)
@@ -5280,13 +5343,12 @@ static const struct hl_asic_funcs goya_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
+ .ctx_init = goya_ctx_init,
.get_clk_rate = goya_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.read_device_fw_version = goya_read_device_fw_version,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
- .ext_queue_init = goya_ext_queue_init,
- .ext_queue_reset = goya_ext_queue_reset,
.get_signal_cb_size = goya_get_signal_cb_size,
.get_wait_cb_size = goya_get_wait_cb_size,
.gen_signal_cb = goya_gen_signal_cb,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index d36f8d90c9c9..bb7474ee9784 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -9,12 +9,12 @@
#define GOYAP_H_
#include <uapi/misc/habanalabs.h>
-#include "habanalabs.h"
-#include "include/hl_boot_if.h"
-#include "include/goya/goya_packets.h"
-#include "include/goya/goya.h"
-#include "include/goya/goya_async_events.h"
-#include "include/goya/goya_fw_if.h"
+#include "../common/habanalabs.h"
+#include "../include/common/hl_boot_if.h"
+#include "../include/goya/goya_packets.h"
+#include "../include/goya/goya.h"
+#include "../include/goya/goya_async_events.h"
+#include "../include/goya/goya_fw_if.h"
#define NUMBER_OF_CMPLT_QUEUES 5
#define NUMBER_OF_EXT_HW_QUEUES 5
@@ -31,10 +31,6 @@
*/
#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1)
-#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES)
-#error "Number of H/W queues must be smaller than HL_MAX_QUEUES"
-#endif
-
#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES)
#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
#endif
@@ -57,6 +53,12 @@
#define GOYA_DEFAULT_CARD_NAME "HL1000"
+#define GOYA_MAX_PENDING_CS 64
+
+#if !IS_MAX_PENDING_CS_VALID(GOYA_MAX_PENDING_CS)
+#error "GOYA_MAX_PENDING_CS must be power of 2 and greater than 1"
+#endif
+
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -164,7 +166,7 @@ struct goya_device {
u8 device_cpu_mmu_mappings_done;
};
-void goya_get_fixed_properties(struct hl_device *hdev);
+int goya_get_fixed_properties(struct hl_device *hdev);
int goya_mmu_init(struct hl_device *hdev);
void goya_init_dma_qmans(struct hl_device *hdev);
void goya_init_mme_qmans(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1258724ea510..b03912483de0 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -6,9 +6,9 @@
*/
#include "goyaP.h"
-#include "include/goya/goya_coresight.h"
-#include "include/goya/asic_reg/goya_regs.h"
-#include "include/goya/asic_reg/goya_masks.h"
+#include "../include/goya/goya_coresight.h"
+#include "../include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -232,6 +232,7 @@ static int goya_config_stm(struct hl_device *hdev,
{
struct hl_debug_params_stm *input;
u64 base_reg;
+ u32 frequency;
int rc;
if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
@@ -264,7 +265,10 @@ static int goya_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xE20, 0xFFFFFFFF);
WREG32(base_reg + 0xEF4, input->id);
WREG32(base_reg + 0xDF4, 0x80);
- WREG32(base_reg + 0xE8C, input->frequency);
+ frequency = hdev->asic_prop.psoc_timestamp_frequency;
+ if (frequency == 0)
+ frequency = input->frequency;
+ WREG32(base_reg + 0xE8C, frequency);
WREG32(base_reg + 0xE90, 0x7FF);
WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
@@ -640,7 +644,6 @@ static int goya_config_spmu(struct hl_device *hdev,
int goya_debug_coresight(struct hl_device *hdev, void *data)
{
struct hl_debug_params *params = data;
- u32 val;
int rc = 0;
switch (params->op) {
@@ -672,7 +675,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index de8297001fea..14701836f92b 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -6,7 +6,7 @@
*/
#include "goyaP.h"
-#include "include/goya/asic_reg/goya_regs.h"
+#include "../include/goya/asic_reg/goya_regs.h"
/*
* goya_set_block_as_protected - set the given block as protected
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/common/armcp_if.h
index a34fc39ad87e..07f9972db28d 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/common/armcp_if.h
@@ -19,9 +19,19 @@ struct hl_eq_header {
__le32 ctl;
};
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
- __le64 data[7];
+ union {
+ struct hl_eq_ecc_data ecc_data;
+ __le64 data[7];
+ };
};
#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
@@ -276,6 +286,8 @@ struct armcp_packet {
/* For get Armcp info/EEPROM data */
__le32 data_max_size;
};
+
+ __le32 reserved;
};
struct armcp_unmask_irq_arr_packet {
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index c22d134e73af..bb67cafc6e00 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -44,6 +44,15 @@
* The NIC FW loading and initialization
* failed. This means NICs are not usable.
*
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -57,6 +66,8 @@
#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8)
#define CPU_BOOT_ERR0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -79,7 +90,10 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
/* Last boot loader progress status, ready to receive commands */
CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/common/qman_if.h
index 0fdb49188ed7..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/common/qman_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 85e3b5148595..f92dc53af074 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -91,18 +91,16 @@
#include "psoc_pci_pll_regs.h"
#include "psoc_hbm_pll_regs.h"
+#include "psoc_cpu_pll_regs.h"
-#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
-#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
-#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
-#define GAUDI_ECC_SERR0_OFFSET 0xF30
-#define GAUDI_ECC_SERR1_OFFSET 0xF34
-#define GAUDI_ECC_SERR2_OFFSET 0xF38
-#define GAUDI_ECC_SERR3_OFFSET 0xF3C
-#define GAUDI_ECC_DERR0_OFFSET 0xF40
-#define GAUDI_ECC_DERR1_OFFSET 0xF44
-#define GAUDI_ECC_DERR2_OFFSET 0xF48
-#define GAUDI_ECC_DERR3_OFFSET 0xF4C
+#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
+#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
+#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
+#define GAUDI_ECC_MEM_INFO_CLR_OFFSET 0xF28
+#define GAUDI_ECC_MEM_INFO_CLR_SERR_MASK BIT(8)
+#define GAUDI_ECC_MEM_INFO_CLR_DERR_MASK BIT(9)
+#define GAUDI_ECC_SERR0_OFFSET 0xF30
+#define GAUDI_ECC_DERR0_OFFSET 0xF40
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000
#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000
@@ -294,6 +292,7 @@
#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
new file mode 100644
index 000000000000..2585c70f59ef
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_CPU_PLL_REGS_H_
+#define ASIC_REG_PSOC_CPU_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_CPU_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_CPU_PLL_NR 0xC70100
+
+#define mmPSOC_CPU_PLL_NF 0xC70104
+
+#define mmPSOC_CPU_PLL_OD 0xC70108
+
+#define mmPSOC_CPU_PLL_NB 0xC7010C
+
+#define mmPSOC_CPU_PLL_CFG 0xC70110
+
+#define mmPSOC_CPU_PLL_LOSE_MASK 0xC70120
+
+#define mmPSOC_CPU_PLL_LOCK_INTR 0xC70128
+
+#define mmPSOC_CPU_PLL_LOCK_BYPASS 0xC7012C
+
+#define mmPSOC_CPU_PLL_DATA_CHNG 0xC70130
+
+#define mmPSOC_CPU_PLL_RST 0xC70134
+
+#define mmPSOC_CPU_PLL_SLIP_WD_CNTR 0xC70150
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_0 0xC70200
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_1 0xC70204
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_2 0xC70208
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_3 0xC7020C
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_0 0xC70220
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_1 0xC70224
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_2 0xC70228
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_CMD_3 0xC7022C
+
+#define mmPSOC_CPU_PLL_DIV_SEL_0 0xC70280
+
+#define mmPSOC_CPU_PLL_DIV_SEL_1 0xC70284
+
+#define mmPSOC_CPU_PLL_DIV_SEL_2 0xC70288
+
+#define mmPSOC_CPU_PLL_DIV_SEL_3 0xC7028C
+
+#define mmPSOC_CPU_PLL_DIV_EN_0 0xC702A0
+
+#define mmPSOC_CPU_PLL_DIV_EN_1 0xC702A4
+
+#define mmPSOC_CPU_PLL_DIV_EN_2 0xC702A8
+
+#define mmPSOC_CPU_PLL_DIV_EN_3 0xC702AC
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_0 0xC702C0
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_1 0xC702C4
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_2 0xC702C8
+
+#define mmPSOC_CPU_PLL_DIV_FACTOR_BUSY_3 0xC702CC
+
+#define mmPSOC_CPU_PLL_CLK_GATER 0xC70300
+
+#define mmPSOC_CPU_PLL_CLK_RLX_0 0xC70310
+
+#define mmPSOC_CPU_PLL_CLK_RLX_1 0xC70314
+
+#define mmPSOC_CPU_PLL_CLK_RLX_2 0xC70318
+
+#define mmPSOC_CPU_PLL_CLK_RLX_3 0xC7031C
+
+#define mmPSOC_CPU_PLL_REF_CNTR_PERIOD 0xC70400
+
+#define mmPSOC_CPU_PLL_REF_LOW_THRESHOLD 0xC70410
+
+#define mmPSOC_CPU_PLL_REF_HIGH_THRESHOLD 0xC70420
+
+#define mmPSOC_CPU_PLL_PLL_NOT_STABLE 0xC70430
+
+#define mmPSOC_CPU_PLL_FREQ_CALC_EN 0xC70440
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_CFG 0xC70500
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_0 0xC70510
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_1 0xC70514
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_2 0xC70518
+
+#define mmPSOC_CPU_PLL_RLX_BITMAP_3 0xC7051C
+
+#endif /* ASIC_REG_PSOC_CPU_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 96f08050ef0f..13ef6b2887fd 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -455,4 +455,7 @@ enum axi_id {
QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
+#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
+
#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index 0f0cd067bb43..f30f2c0458d7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -85,7 +85,7 @@ struct packet_msg_long {
};
#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
-#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x0000EFFF
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x00007FFF
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
@@ -141,7 +141,7 @@ struct packet_msg_prot {
#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30
-#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC000000
+#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC0000000
#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 927309b86bab..fea3ae9d8686 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -207,7 +207,7 @@ static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
static inline int fifo_sz(int nr_entry)
{
/* size of a fifo is determined by the number of entries it contains */
- return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
+ return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
}
static void fifo_setup(void *base_addr, int nr_entry)
@@ -256,7 +256,8 @@ static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
memset_io(device_ccb, 0, sizeof(struct ccb));
/* free resources used to back send/recv queues */
- pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
+ dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
+ data->dma_pa);
}
static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
@@ -272,16 +273,14 @@ static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
2 * desc_mem_sz(NR_QENTRY) +
ILO_START_ALIGN + ILO_CACHE_SZ;
- data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
- &data->dma_pa);
+ data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
+ &data->dma_pa, GFP_ATOMIC);
if (!data->dma_va)
return -ENOMEM;
dma_va = (char *)data->dma_va;
dma_pa = data->dma_pa;
- memset(dma_va, 0, data->dma_size);
-
dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
dma_pa = roundup(dma_pa, ILO_START_ALIGN);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 1aa433a7f66c..f69ff645cac9 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -160,23 +160,23 @@ struct ccb_data {
#define ILO_START_ALIGN 4096
#define ILO_CACHE_SZ 128
struct fifo {
- u64 nrents; /* user requested number of fifo entries */
- u64 imask; /* mask to extract valid fifo index */
- u64 merge; /* O/C bits to merge in during enqueue operation */
- u64 reset; /* set to non-zero when the target device resets */
- u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
+ u64 nrents; /* user requested number of fifo entries */
+ u64 imask; /* mask to extract valid fifo index */
+ u64 merge; /* O/C bits to merge in during enqueue operation */
+ u64 reset; /* set to non-zero when the target device resets */
+ u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)];
- u64 head;
- u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 head;
+ u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))];
- u64 tail;
- u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
+ u64 tail;
+ u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))];
- u64 fifobar[1];
+ u64 fifobar[];
};
/* convert between struct fifo, and the fifobar, which is saved in the ccb */
-#define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64))
+#define FIFOHANDLESIZE (sizeof(struct fifo))
#define FIFOBARTOHANDLE(_fifo) \
((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE))
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 2863657fa268..733dd30fbacc 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -94,7 +94,7 @@ static inline void do_exec_command(struct service_processor *sp)
}
}
-/**
+/*
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
@@ -140,7 +140,7 @@ static void exec_next_command(struct service_processor *sp)
}
}
-/**
+/*
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
@@ -153,7 +153,7 @@ void ibmasm_wait_for_response(struct command *cmd, int timeout)
timeout * HZ);
}
-/**
+/*
* receive_command_response
* called by the interrupt handler when a dot command of type command_response
* was received.
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 70273a4cb352..df389bd4c9df 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -10,7 +10,7 @@
#include "ibmasm.h"
#include "dot_command.h"
-/**
+/*
* Dispatch an incoming message to the specific handler for the message.
* Called from interrupt context.
*/
@@ -48,7 +48,7 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
#define INIT_BUFFER_SIZE 32
-/**
+/*
* send the 4.3.5.10 dot command (driver VPD) to the service processor
*/
int ibmasm_send_driver_vpd(struct service_processor *sp)
@@ -99,7 +99,7 @@ struct os_state_command {
unsigned char data;
};
-/**
+/*
* send the 4.3.6 dot command (os state) to the service processor
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 974d63f5a4dd..40ce75f8970c 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -31,7 +31,7 @@ static void wake_up_event_readers(struct service_processor *sp)
wake_up_interruptible(&reader->wait);
}
-/**
+/*
* receive_event
* Called by the interrupt handler when a dot command of type sp_event is
* received.
@@ -68,7 +68,7 @@ static inline int event_available(struct event_buffer *b, struct event_reader *r
return (r->next_serial_number < b->next_serial_number);
}
-/**
+/*
* get_next_event
* Called by event readers (initiated from user space through the file
* system).
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 6567df638ea9..21c9b6a6f2c3 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -39,7 +39,7 @@ void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_
rhb->stopped = 0;
}
-/**
+/*
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 2ed23c99f59f..c0d139c26505 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -760,7 +760,7 @@ static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
* @adapter: crq_server_adapter struct
* @buffer: ibmvmc_buffer struct
* @hmc: ibmvmc_hmc struct
- * @msg_length: message length field
+ * @msg_len: message length field
*
* This command is sent between the management partition and the hypervisor
* in order to signal the arrival of an HMC protocol message. The command
@@ -1028,7 +1028,7 @@ static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
* ibmvmc_write - Write
*
* @file: file struct
- * @buf: Character buffer
+ * @buffer: Character buffer
* @count: Count field
* @ppos: Offset
*
@@ -1347,7 +1347,7 @@ static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
/**
* ibmvmc_ioctl - IOCTL
*
- * @session: ibmvmc_file_session struct
+ * @file: file information
* @cmd: cmd field
* @arg: Argument field
*
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 884485c3f723..5eaf74447ca1 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -67,7 +67,6 @@ static void firmware_load(const struct firmware *fw, void *context)
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = spi_get_drvdata(spi);
u8 *buffer;
- int ret;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
@@ -92,7 +91,7 @@ static void firmware_load(const struct firmware *fw, void *context)
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
jedec_id = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
@@ -110,7 +109,7 @@ static void firmware_load(const struct firmware *fw, void *context)
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
@@ -130,20 +129,20 @@ static void firmware_load(const struct firmware *fw, void *context)
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
if (status == FPGA_STATUS_CLEARED)
break;
@@ -160,13 +159,13 @@ static void firmware_load(const struct firmware *fw, void *context)
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
- ret = spi_write(spi, buffer, fw->size + 8);
+ spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
- ret = spi_write(spi, txbuf, 4);
+ spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
- ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 736675f0a246..4dfbfd51bdf7 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -13,7 +13,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
#include <asm/desc.h>
#endif
@@ -118,9 +118,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing char array ...\n");
+ pr_info("Corrupting stack containing char array ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
/* Same as above but will only get a canary with -fstack-protector-strong */
@@ -131,9 +130,8 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
unsigned long *ptr;
} data __aligned(sizeof(void *));
- __lkdtm_CORRUPT_STACK(&data);
-
- pr_info("Corrupted stack containing union ...\n");
+ pr_info("Corrupting stack containing union ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -248,6 +246,7 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
+ pr_err("FAIL: survived array bounds overflow!\n");
}
void lkdtm_CORRUPT_LIST_ADD(void)
@@ -334,7 +333,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
byte = *ptr;
- pr_err("FAIL: accessed page before stack!\n");
+ pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
@@ -348,7 +347,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
byte = *ptr;
- pr_err("FAIL: accessed page after stack!\n");
+ pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
void lkdtm_UNSET_SMEP(void)
@@ -419,7 +418,7 @@ void lkdtm_UNSET_SMEP(void)
void lkdtm_DOUBLE_FAULT(void)
{
-#ifdef CONFIG_X86_32
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
@@ -454,38 +453,42 @@ void lkdtm_DOUBLE_FAULT(void)
#endif
}
-#ifdef CONFIG_ARM64_PTR_AUTH
+#ifdef CONFIG_ARM64
static noinline void change_pac_parameters(void)
{
- /* Reset the keys of current task */
- ptrauth_thread_init_kernel(current);
- ptrauth_thread_switch_kernel(current);
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+ }
}
+#endif
-#define CORRUPT_PAC_ITERATE 10
noinline void lkdtm_CORRUPT_PAC(void)
{
+#ifdef CONFIG_ARM64
+#define CORRUPT_PAC_ITERATE 10
int i;
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+
if (!system_supports_address_auth()) {
- pr_err("FAIL: arm64 pointer authentication feature not present\n");
+ pr_err("FAIL: CPU lacks pointer authentication feature\n");
return;
}
- pr_info("Change the PAC parameters to force function return failure\n");
+ pr_info("changing PAC parameters to force function return failure...\n");
/*
- * Pac is a hash value computed from input keys, return address and
+ * PAC is a hash value computed from input keys, return address and
* stack pointer. As pac has fewer bits so there is a chance of
* collision, so iterate few times to reduce the collision probability.
*/
for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
change_pac_parameters();
- pr_err("FAIL: %s test failed. Kernel may be unstable from here\n", __func__);
-}
-#else /* !CONFIG_ARM64_PTR_AUTH */
-noinline void lkdtm_CORRUPT_PAC(void)
-{
- pr_err("FAIL: arm64 pointer authentication config disabled\n");
-}
+ pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
+#else
+ pr_err("XFAIL: this test is arm64-only\n");
#endif
+}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 3c5cec85edce..1323bc16f113 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -58,11 +58,12 @@ void lkdtm_READ_AFTER_FREE(void)
int *base, *val, saw;
size_t len = 1024;
/*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
*/
- size_t offset = (len / sizeof(*base)) / 2;
+ size_t offset = sizeof(*base);
base = kmalloc(len, GFP_KERNEL);
if (!base) {
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 601a2156a0d4..8878538b2c13 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -31,9 +31,7 @@ void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void);
-#endif
void lkdtm_CORRUPT_PAC(void);
/* lkdtm_heap.c */
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 62f76d506f04..2dede2ef658f 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -57,6 +57,7 @@ static noinline void execute_location(void *dst, bool write)
}
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
static void execute_user_location(void *dst)
@@ -75,20 +76,22 @@ static void execute_user_location(void *dst)
return;
pr_info("attempting bad execution at %px\n", func);
func();
+ pr_err("FAIL: func returned\n");
}
void lkdtm_WRITE_RO(void)
{
- /* Explicitly cast away "const" for the test. */
- unsigned long *ptr = (unsigned long *)&rodata;
+ /* Explicitly cast away "const" for the test and make volatile. */
+ volatile unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_RO_AFTER_INIT(void)
{
- unsigned long *ptr = &ro_after_init;
+ volatile unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
@@ -102,19 +105,21 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
}
void lkdtm_WRITE_KERN(void)
{
size_t size;
- unsigned char *ptr;
+ volatile unsigned char *ptr;
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %px\n", size, ptr);
- memcpy(ptr, (unsigned char *)do_nothing, size);
+ memcpy((void *)ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ pr_err("FAIL: survived bad write\n");
do_overwritten();
}
@@ -193,9 +198,11 @@ void lkdtm_ACCESS_USERSPACE(void)
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
vm_munmap(user_addr, PAGE_SIZE);
}
@@ -203,19 +210,20 @@ void lkdtm_ACCESS_USERSPACE(void)
void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
- unsigned long *ptr = (unsigned long *)NULL;
+ volatile unsigned long *ptr = (unsigned long *)NULL;
pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
-
}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index e172719dd86d..b833367a45d0 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -304,19 +304,22 @@ void lkdtm_USERCOPY_KERNEL(void)
return;
}
- pr_info("attempting good copy_to_user from kernel rodata\n");
+ pr_info("attempting good copy_to_user from kernel rodata: %px\n",
+ test_text);
if (copy_to_user((void __user *)user_addr, test_text,
unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
- pr_info("attempting bad copy_to_user from kernel text\n");
+ pr_info("attempting bad copy_to_user from kernel text: %px\n",
+ vm_mmap);
if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
+ pr_err("FAIL: survived bad copy_to_user()\n");
free_user:
vm_munmap(user_addr, PAGE_SIZE);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 9d7b3719bfa0..f5fd5b786607 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -9,7 +9,7 @@ config INTEL_MEI
if selected /dev/mei misc device will be created.
For more information see
- <http://software.intel.com/en-us/manageability/>
+ <https://software.intel.com/en-us/manageability/>
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 910f059b3384..07ba16d46690 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -159,17 +159,17 @@ static int mei_osver(struct mei_cl_device *cldev)
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
- struct mkhi_msg *req;
+ struct mkhi_msg req;
+ struct mkhi_msg *rsp;
struct mkhi_fw_ver *fwver;
int bytes_recv, ret, i;
memset(buf, 0, sizeof(buf));
- req = (struct mkhi_msg *)buf;
- req->hdr.group_id = MKHI_GEN_GROUP_ID;
- req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
+ req.hdr.group_id = MKHI_GEN_GROUP_ID;
+ req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
- ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
+ ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req),
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
@@ -188,7 +188,8 @@ static int mei_fwver(struct mei_cl_device *cldev)
return -EIO;
}
- fwver = (struct mkhi_fw_ver *)req->data;
+ rsp = (struct mkhi_msg *)buf;
+ fwver = (struct mkhi_fw_ver *)rsp->data;
memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
@@ -329,16 +330,14 @@ static int mei_nfc_if_version(struct mei_cl *cl,
WARN_ON(mutex_is_locked(&bus->device_lock));
- ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
- MEI_CL_IO_TX_BLOCKING);
+ ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
dev_err(bus->dev, "Could not send IF version cmd\n");
return ret;
}
/* to be sure on the stack we alloc memory */
- if_version_length = sizeof(struct mei_nfc_reply) +
- sizeof(struct mei_nfc_if_version);
+ if_version_length = sizeof(*reply) + sizeof(*ver);
reply = kzalloc(if_version_length, GFP_KERNEL);
if (!reply)
@@ -352,7 +351,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
goto err;
}
- memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+ memcpy(ver, reply->data, sizeof(*ver));
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index f476dbc7252b..a6dfc3ce1db2 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -931,7 +931,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
struct mei_cl_device *cldev;
struct mei_cl *cl;
- cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+ cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
if (!cldev)
return NULL;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index b32c825a0945..2572887d99b6 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -369,7 +369,7 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
{
struct mei_cl_cb *cb;
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -552,7 +552,7 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
*/
static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
- memset(cl, 0, sizeof(struct mei_cl));
+ memset(cl, 0, sizeof(*cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
@@ -575,7 +575,7 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return NULL;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a44094cdbc36..308caee86920 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
@@ -257,22 +257,21 @@ int mei_hbm_start_wait(struct mei_device *dev)
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_version_request start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
+ struct hbm_host_version_request req;
int ret;
mei_hbm_reset(dev);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
/* host start message */
- memset(&start_req, 0, len);
- start_req.hbm_cmd = HOST_START_REQ_CMD;
- start_req.host_version.major_version = HBM_MAJOR_VERSION;
- start_req.host_version.minor_version = HBM_MINOR_VERSION;
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_START_REQ_CMD;
+ req.host_version.major_version = HBM_MAJOR_VERSION;
+ req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_hbm_write_message(dev, &mei_hdr, &start_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -295,13 +294,12 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_dma_setup_request req;
- const size_t len = sizeof(struct hbm_dma_setup_request);
unsigned int i;
int ret;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD;
for (i = 0; i < DMA_DSCR_NUM; i++) {
phys_addr_t paddr;
@@ -337,21 +335,19 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_host_enum_request enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
+ struct hbm_host_enum_request req;
int ret;
/* enumerate clients */
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&enum_req, 0, len);
- enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req.flags |= dev->hbm_f_dc_supported ?
- MEI_HBM_ENUM_F_ALLOW_ADD : 0;
- enum_req.flags |= dev->hbm_f_ie_supported ?
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = HOST_ENUM_REQ_CMD;
+ req.flags |= dev->hbm_f_dc_supported ? MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -380,7 +376,7 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
mei_me_cl_rm_by_uuid(dev, uuid);
- me_cl = kzalloc(sizeof(struct mei_me_client), GFP_KERNEL);
+ me_cl = kzalloc(sizeof(*me_cl), GFP_KERNEL);
if (!me_cl)
return -ENOMEM;
@@ -408,14 +404,13 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
struct mei_msg_hdr mei_hdr;
struct hbm_add_client_response resp;
- const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(resp));
- memset(&resp, 0, sizeof(struct hbm_add_client_response));
+ memset(&resp, 0, sizeof(resp));
resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp.me_addr = addr;
resp.status = status;
@@ -469,11 +464,10 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_msg_hdr mei_hdr;
struct hbm_notification_request req;
- const size_t len = sizeof(struct hbm_notification_request);
int ret;
- mei_hbm_hdr(&mei_hdr, len);
- mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, sizeof(req));
req.start = start;
@@ -580,8 +574,7 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
struct mei_msg_hdr mei_hdr;
- struct hbm_props_request prop_req;
- const size_t len = sizeof(struct hbm_props_request);
+ struct hbm_props_request req;
unsigned long addr;
int ret;
@@ -591,18 +584,17 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
if (addr == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
mei_host_client_init(dev);
-
return 0;
}
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&prop_req, 0, sizeof(struct hbm_props_request));
+ memset(&req, 0, sizeof(req));
- prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req.me_addr = addr;
+ req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ req.me_addr = addr;
- ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -628,15 +620,14 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
struct mei_msg_hdr mei_hdr;
struct hbm_power_gate req;
- const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = pg_cmd;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
@@ -657,11 +648,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
{
struct mei_msg_hdr mei_hdr;
struct hbm_host_stop_request req;
- const size_t len = sizeof(struct hbm_host_stop_request);
- mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
- memset(&req, 0, len);
+ memset(&req, 0, sizeof(req));
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 4c596c646ac0..d1d3e025ca0e 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -572,7 +572,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in.streams, data->streams,
- (data->k * sizeof(struct hdcp2_streamid_type)));
+ array_size(data->k, sizeof(*data->streams)));
verify_mprime_in.k = cpu_to_be16(data->k);
@@ -852,7 +852,7 @@ static int mei_hdcp_remove(struct mei_cl_device *cldev)
#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
-static struct mei_cl_device_id mei_hdcp_tbl[] = {
+static const struct mei_cl_device_id mei_hdcp_tbl[] = {
{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
{ }
};
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index 18ffc773fa18..834757f5e072 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -358,7 +358,7 @@ struct wired_cmd_repeater_auth_stream_req_in {
u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
u8 m_prime[HDCP_2_2_MPRIME_LEN];
__be16 k;
- struct hdcp2_streamid_type streams[1];
+ struct hdcp2_streamid_type streams[];
} __packed;
struct wired_cmd_repeater_auth_stream_req_out {
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7becfc768bbc..9cf8d8f60cfe 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -59,6 +59,7 @@
#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_3 0x9D3E /* Sunrise Point 3 (iToutch) */
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
@@ -73,6 +74,7 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+#define MEI_DEV_ID_KBP_3 0xA2BE /* Kaby Point 3 (iTouch) */
#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
#define MEI_DEV_ID_CNP_LP_3 0x9DE4 /* Cannon Point LP 3 (iTouch) */
@@ -106,6 +108,8 @@
/* Host Firmware Status Registers in PCI Config Space */
#define PCI_CFG_HFS_1 0x40
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
+# define PCI_CFG_HFS_1_OPMODE_MSK 0xf0000 /* OP MODE Mask: SPS <= 4.0 */
+# define PCI_CFG_HFS_1_OPMODE_SPS 0xf0000 /* SPS SKU : SPS <= 4.0 */
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 7649710a2ab9..cda0829ac589 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1353,11 +1353,24 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.read = mei_me_read_slots
};
-static bool mei_me_fw_type_nm(struct pci_dev *pdev)
+/**
+ * mei_me_fw_type_nm() - check for nm sku
+ *
+ * Read ME FW Status register to check for the Node Manager (NM) Firmware.
+ * The NM FW is only signaled in PCI function 0.
+ * __Note__: Deprecated by PCH8 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of NM firmware
+ */
+static bool mei_me_fw_type_nm(const struct pci_dev *pdev)
{
u32 reg;
+ unsigned int devfn;
- pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_2, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
@@ -1366,20 +1379,26 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
-static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
+/**
+ * mei_me_fw_sku_sps_4() - check for sps 4.0 sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in the PCI function 0.
+ * __Note__: Deprecated by SPS 5.0 and newer.
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps_4(const struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
- /*
- * Read ME FW Status register to check for SPS Firmware
- * The SPS FW is only signaled in pci function 0
- */
devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
- /* if bits [19:16] = 15, running SPS Firmware */
- return (reg & 0xf0000) == 0xf0000;
+ return (reg & PCI_CFG_HFS_1_OPMODE_MSK) == PCI_CFG_HFS_1_OPMODE_SPS;
}
#define MEI_CFG_FW_SPS_4 \
@@ -1395,7 +1414,7 @@ static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
*
* Return: true in case of SPS firmware
*/
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps(const struct pci_dev *pdev)
{
u32 reg;
u32 fw_type;
@@ -1411,6 +1430,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
}
+#define MEI_CFG_KIND_ITOUCH \
+ .kind = "itouch"
+
#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
@@ -1480,6 +1502,13 @@ static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_FW_VER_SUPP,
};
+/* PCH8 Lynx Point and newer devices - iTouch */
+static const struct mei_cfg mei_me_pch8_itouch_cfg = {
+ MEI_CFG_KIND_ITOUCH,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
@@ -1509,10 +1538,11 @@ static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_FW_SPS,
};
-/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
- * w/o DMA support
+/* Cannon Lake itouch with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support.
*/
-static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
+static const struct mei_cfg mei_me_pch12_itouch_sps_cfg = {
+ MEI_CFG_KIND_ITOUCH,
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
@@ -1547,11 +1577,12 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
+ [MEI_ME_PCH8_ITOUCH_CFG] = &mei_me_pch8_itouch_cfg,
[MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
[MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
- [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
+ [MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};
@@ -1581,8 +1612,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(parent, sizeof(struct mei_device) +
- sizeof(struct mei_me_hw), GFP_KERNEL);
+ dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1596,6 +1626,8 @@ struct mei_device *mei_me_dev_init(struct device *parent,
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+ dev->kind = cfg->kind;
+
return dev;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 6a8973649c49..00a7132ac7a2 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -19,13 +19,15 @@
*
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
+ * @kind: MEI head kind
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
* @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
- bool (*quirk_probe)(struct pci_dev *pdev);
+ bool (*quirk_probe)(const struct pci_dev *pdev);
+ const char *kind;
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
u32 hw_trc_supported:1;
@@ -76,6 +78,8 @@ struct mei_me_hw {
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
+ * @MEI_ME_PCH8_ITOUCH_CFG:Platform Controller Hub Gen8 and newer
+ * client platforms (iTouch).
* @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
@@ -100,11 +104,12 @@ enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
+ MEI_ME_PCH8_ITOUCH_CFG,
MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
- MEI_ME_PCH12_SPS_NODMA_CFG,
+ MEI_ME_PCH12_SPS_ITOUCH_CFG,
MEI_ME_PCH15_CFG,
MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 785b260b3ae9..a4e854b9b9e6 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1201,8 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
- sizeof(struct mei_txe_hw), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b1a8d5ec88b3..26fa92cb7f7a 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -319,7 +319,7 @@ struct hbm_props_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
struct mei_client_properties client_properties;
} __packed;
@@ -352,7 +352,7 @@ struct hbm_add_client_response {
u8 hbm_cmd;
u8 me_addr;
u8 status;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
@@ -461,7 +461,7 @@ struct hbm_notification {
u8 hbm_cmd;
u8 me_addr;
u8 host_addr;
- u8 reserved[1];
+ u8 reserved;
} __packed;
/**
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index f17297f2943d..86ef5c1a7928 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -476,7 +476,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
case IOCTL_MEI_CONNECT_CLIENT:
dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
if (copy_from_user(&connect_data, (char __user *)data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
@@ -488,7 +488,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
/* if all is ok, copying the data back to user. */
if (copy_to_user((char __user *)data, &connect_data,
- sizeof(struct mei_connect_client_data))) {
+ sizeof(connect_data))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
@@ -885,6 +885,30 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
}
}
+/**
+ * kind_show - display device kind
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t kind_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ ssize_t ret;
+
+ if (dev->kind)
+ ret = sprintf(buf, "%s\n", dev->kind);
+ else
+ ret = sprintf(buf, "%s\n", "mei");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(kind);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
@@ -893,6 +917,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
&dev_attr_trc.attr,
+ &dev_attr_kind.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 3a29db07211d..d3a4f54c0ae7 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -445,6 +445,8 @@ struct mei_fw_version {
* @device_list : mei client bus list
* @cl_bus_lock : client bus list lock
*
+ * @kind : kind of mei device
+ *
* @dbgfs_dir : debugfs mei root directory
*
* @ops: : hw specific operations
@@ -528,6 +530,8 @@ struct mei_device {
struct list_head device_list;
struct mutex cl_bus_lock;
+ const char *kind;
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2a3f2fd5df50..1de9ef7a272b 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -68,6 +68,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
@@ -81,17 +82,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_3, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 8f201d019f5a..b9bb086785db 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -4,6 +4,7 @@ menu "Intel MIC & related support"
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
@@ -19,6 +20,7 @@ config INTEL_MIC_BUS
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
@@ -33,6 +35,7 @@ config SCIF_BUS
config VOP_BUS
tristate "VOP Bus Driver"
+ select DMA_OPS
help
This option is selected by any driver which registers a
device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
@@ -49,6 +52,7 @@ config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
+ select DMA_OPS
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
index b58608829b18..4c326e8f4d99 100644
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ b/drivers/misc/mic/card/mic_debugfs.c
@@ -24,7 +24,7 @@
/* Debugfs parent dir */
static struct dentry *mic_dbg;
-/**
+/*
* mic_intr_show - Send interrupts to host.
*/
static int mic_intr_show(struct seq_file *s, void *unused)
@@ -46,7 +46,7 @@ static int mic_intr_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(mic_intr);
-/**
+/*
* mic_create_card_debug_dir - Initialize MIC debugfs entries.
*/
void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
@@ -60,7 +60,7 @@ void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
&mic_intr_fops);
}
-/**
+/*
* mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_card_debug_dir(struct mic_driver *mdrv)
@@ -68,7 +68,7 @@ void mic_delete_card_debug_dir(struct mic_driver *mdrv)
debugfs_remove_recursive(mdrv->dbg_dir);
}
-/**
+/*
* mic_init_card_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_card_debugfs(void)
@@ -76,7 +76,7 @@ void __init mic_init_card_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_card_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_card_debugfs(void)
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
index 68a731fd86de..cb55653cf1f9 100644
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -15,7 +15,7 @@
/* Debugfs parent dir */
static struct dentry *cosm_dbg;
-/**
+/*
* log_buf_show - Display MIC kernel log buffer
*
* log_buf addr/len is read from System.map by user space
@@ -68,7 +68,7 @@ done:
DEFINE_SHOW_ATTRIBUTE(log_buf);
-/**
+/*
* force_reset_show - Force MIC reset
*
* Invokes the force_reset COSM bus op instead of the standard reset
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index f9133c4f6105..ebb0eac43754 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -26,6 +26,7 @@ static atomic_t g_num_dev;
/**
* cosm_hw_reset - Issue a HW reset for the MIC device
* @cdev: pointer to cosm_device instance
+ * @force: force a MIC to reset even if it is already reset and ready
*/
static void cosm_hw_reset(struct cosm_device *cdev, bool force)
{
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index ab0db7a2ac8c..ffda740e20d5 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -101,7 +101,7 @@ static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
-/**
+/*
* mic_create_debug_dir - Initialize MIC debugfs entries.
*/
void mic_create_debug_dir(struct mic_device *mdev)
@@ -124,7 +124,7 @@ void mic_create_debug_dir(struct mic_device *mdev)
&mic_msi_irq_info_fops);
}
-/**
+/*
* mic_delete_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_debug_dir(struct mic_device *mdev)
@@ -132,7 +132,7 @@ void mic_delete_debug_dir(struct mic_device *mdev)
debugfs_remove_recursive(mdev->dbg_dir);
}
-/**
+/*
* mic_init_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_debugfs(void)
@@ -140,7 +140,7 @@ void __init mic_init_debugfs(void)
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
}
-/**
+/*
* mic_exit_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_debugfs(void)
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index 433d35dc1721..85b3221b5d40 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -37,6 +37,8 @@ static irqreturn_t mic_thread_fn(int irq, void *dev)
/**
* mic_interrupt - Generic interrupt handler for
* MSI and INTx based interrupts.
+ * @irq: interrupt to handle (unused)
+ * @dev: pointer to the mic_device instance
*/
static irqreturn_t mic_interrupt(int irq, void *dev)
{
@@ -180,7 +182,7 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
* mic_setup_msix - Initializes MSIx interrupts.
*
* @mdev: pointer to mic_device instance
- *
+ * @pdev: PCI device structure
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index be0784fd1635..ea4608527ea0 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -164,7 +164,6 @@ static int mic_probe(struct pci_dev *pdev,
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
rc = -ENOMEM;
- dev_err(&pdev->dev, "mdev kmalloc failed rc %d\n", rc);
goto mdev_alloc_fail;
}
mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index d18cda966912..f5536c1ad607 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -17,6 +17,15 @@
#include "mic_x100.h"
#include "mic_smpt.h"
+static const u16 mic_x100_intr_init[] = {
+ MIC_X100_DOORBELL_IDX_START,
+ MIC_X100_DMA_IDX_START,
+ MIC_X100_ERR_IDX_START,
+ MIC_X100_NUM_DOORBELL,
+ MIC_X100_NUM_DMA,
+ MIC_X100_NUM_ERR,
+};
+
/**
* mic_x100_write_spad - write to the scratchpad register
* @mdev: pointer to mic_device instance
@@ -112,6 +121,7 @@ static void mic_x100_disable_interrupts(struct mic_device *mdev)
/**
* mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_sbox_intr(struct mic_device *mdev,
int doorbell)
@@ -133,6 +143,7 @@ static void mic_x100_send_sbox_intr(struct mic_device *mdev,
/**
* mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
* @mdev: pointer to mic_device instance
+ * @doorbell: doorbell number
*/
static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
int doorbell)
@@ -494,6 +505,8 @@ static u32 mic_x100_get_postcode(struct mic_device *mdev)
/**
* mic_x100_smpt_set - Update an SMPT entry with a DMA address.
* @mdev: pointer to mic_device instance
+ * @dma_addr: DMA address to use
+ * @index: entry to write to
*
* RETURNS: none.
*/
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
index 1f727a6f609c..aebcaed6fa72 100644
--- a/drivers/misc/mic/host/mic_x100.h
+++ b/drivers/misc/mic/host/mic_x100.h
@@ -67,15 +67,6 @@
#define MIC_X100_FW_SIZE 5
#define MIC_X100_POSTCODE 0x242c
-static const u16 mic_x100_intr_init[] = {
- MIC_X100_DOORBELL_IDX_START,
- MIC_X100_DMA_IDX_START,
- MIC_X100_ERR_IDX_START,
- MIC_X100_NUM_DOORBELL,
- MIC_X100_NUM_DMA,
- MIC_X100_NUM_ERR,
-};
-
/* Host->Card(bootstrap) Interrupt Vector */
#define MIC_X100_BSP_INTERRUPT_VECTOR 229
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 781217c030a6..9cc6b2a6cf22 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -713,7 +713,7 @@ int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
}
EXPORT_SYMBOL_GPL(scif_connect);
-/**
+/*
* scif_accept() - Accept a connection request from the remote node
*
* The function accepts a connection request from the remote node. Successful
@@ -997,7 +997,6 @@ static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
{
- int read_size;
struct scif_endpt *ep = (struct scif_endpt *)epd;
struct scifmsg notif_msg;
int curr_recv_len = 0, remaining_len = len, read_count;
@@ -1017,8 +1016,7 @@ static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
* important for the Non Blocking case.
*/
curr_recv_len = min(remaining_len, read_count);
- read_size = scif_rb_get_next(&qp->inbound_q,
- msg, curr_recv_len);
+ scif_rb_get_next(&qp->inbound_q, msg, curr_recv_len);
if (ep->state == SCIFEP_CONNECTED) {
/*
* Update the read pointer only if the endpoint
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index c7c873409184..401b98e5ad79 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -99,7 +99,7 @@ int scif_reserve_dma_chan(struct scif_endpt *ep)
}
#ifdef CONFIG_MMU_NOTIFIER
-/**
+/*
* scif_rma_destroy_tcw:
*
* This routine destroys temporary cached windows
@@ -332,6 +332,7 @@ static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
* @epd: End Point Descriptor.
* @addr: virtual address to/from which to copy
* @len: length of range to copy
+ * @prot: read/write protection
* @out_offset: computed offset returned by reference.
* @out_window: allocated registered window returned by reference.
*
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
index 590baca9dc7b..426687f6696b 100644
--- a/drivers/misc/mic/scif/scif_epd.c
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -104,6 +104,7 @@ void scif_cleanup_zombie_epd(void)
/**
* scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* This message is initiated by the remote node to request a connection
@@ -155,6 +156,7 @@ conreq_sendrej:
/**
* scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* An accept() on the remote node has occurred and sent this message
@@ -181,6 +183,7 @@ void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request has finished mapping the local memory.
@@ -203,6 +206,7 @@ void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote connection request failed to map the local memory it was sent.
@@ -221,6 +225,7 @@ void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote end has rejected the connection request. Set the end
@@ -240,6 +245,7 @@ void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_discnct() - Respond to SCIF_DISCNCT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The remote node has indicated close() has been called on its end
@@ -301,6 +307,7 @@ discnct_ack:
/**
* scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side has indicated it has not more references to local resources
@@ -317,6 +324,7 @@ void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
@@ -333,6 +341,7 @@ void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is confirming send or receive interrupt handling is complete.
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index 657fd4a20656..4fedf6183951 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -11,6 +11,7 @@
/**
* scif_recv_mark: Handle SCIF_MARK request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a mark.
@@ -33,6 +34,7 @@ void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_MARK message.
@@ -56,6 +58,7 @@ void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait: Handle SCIF_WAIT request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested waiting on a fence.
@@ -93,6 +96,7 @@ void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a SCIF_WAIT message.
@@ -114,6 +118,7 @@ void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a local offset.
@@ -135,6 +140,7 @@ void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has requested a signal on a remote offset.
@@ -156,6 +162,7 @@ void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* The peer has responded to a signal request.
@@ -280,12 +287,12 @@ alloc_fail:
return err;
}
-/*
+/**
* scif_prog_signal:
- * @epd - Endpoint Descriptor
- * @offset - registered address to write @val to
- * @val - Value to be written at @offset
- * @type - Type of the window.
+ * @epd: Endpoint Descriptor
+ * @offset: registered address to write @val to
+ * @val: Value to be written at @offset
+ * @type: Type of the window.
*
* Arrange to write a value to the registered offset after ensuring that the
* offset provided is indeed valid.
@@ -501,12 +508,12 @@ retry:
/**
* scif_send_fence_signal:
- * @epd - endpoint descriptor
- * @loff - local offset
- * @lval - local value to write to loffset
- * @roff - remote offset
- * @rval - remote value to write to roffset
- * @flags - flags
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
*
* Sends a remote fence signal request
*/
@@ -577,10 +584,11 @@ static void scif_fence_mark_cb(void *arg)
atomic_dec(&ep->rma_info.fence_refcount);
}
-/*
+/**
* _scif_fence_mark:
+ * @epd: endpoint descriptor
+ * @mark: DMA mark to set-up
*
- * @epd - endpoint descriptor
* Set up a mark for this endpoint and return the value of the mark.
*/
int _scif_fence_mark(scif_epd_t epd, int *mark)
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
index c537df84539a..c4d9422082b7 100644
--- a/drivers/misc/mic/scif/scif_nm.c
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -14,6 +14,8 @@
/**
* scif_invalidate_ep() - Set state for all connected endpoints
* to disconnected and wake up all send/recv waitqueues
+ *
+ * @node: Node to invalidate
*/
static void scif_invalidate_ep(int node)
{
@@ -99,11 +101,10 @@ void scif_send_acks(struct scif_dev *dev)
}
}
-/*
- * scif_cleanup_scifdev
- *
+/**
+ * scif_cleanup_scifdev - Uninitialize SCIF data structures for remote
+ * SCIF device.
* @dev: Remote SCIF device.
- * Uninitialize SCIF data structures for remote SCIF device.
*/
void scif_cleanup_scifdev(struct scif_dev *dev)
{
@@ -136,8 +137,8 @@ void scif_cleanup_scifdev(struct scif_dev *dev)
scif_cleanup_qp(dev);
}
-/*
- * scif_remove_node:
+/**
+ * scif_remove_node
*
* @node: Node to remove
*/
@@ -162,9 +163,9 @@ static int scif_send_rmnode_msg(int node, int remove_node)
}
/**
- * scif_node_disconnect:
+ * scif_node_disconnect
*
- * @node_id[in]: source node id.
+ * @node_id: source node id [in]
* @mgmt_initiated: Disconnection initiated from the mgmt node
*
* Disconnect a node from the scif network.
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index ea084626fe11..e0748be373f1 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -443,6 +443,7 @@ static void scif_deinit_p2p_info(struct scif_dev *scifdev,
/**
* scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
+ * @scifdev: SCIF device
* @dst: Destination node
*
* Connect the src and dst node by setting up the p2p connection
@@ -719,7 +720,7 @@ scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_exit() - Respond to SCIF_EXIT interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
* This function stops the SCIF interface for the node which sent
* the SCIF_EXIT message and starts waiting for that node to
@@ -740,7 +741,7 @@ scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
/**
* scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
* @scifdev: Remote SCIF device node
- * @msg: Interrupt message
+ * @unused: Interrupt message (unused)
*
*/
static __always_inline void
@@ -930,6 +931,7 @@ local_error:
/**
* scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* SCIF_NODE_ADD failed, so inform the waiting wq.
@@ -946,8 +948,9 @@ scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
}
}
-/*
+/**
* scif_node_remove: Handle SCIF_NODE_REMOVE message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Handle node removal.
@@ -962,8 +965,9 @@ scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
scif_handle_remove_node(node);
}
-/*
+/**
* scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* The peer has acked a SCIF_NODE_REMOVE message.
@@ -979,6 +983,7 @@ scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
+ * @scifdev: Remote SCIF device node
* @msg: Interrupt message
*
* Retrieve node info i.e maxid and total from the mgmt node.
@@ -1058,6 +1063,7 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
scif_recv_sig_resp, /* SCIF_SIG_NACK */
};
+static int scif_max_msg_id = SCIF_MAX_MSG;
/**
* scif_nodeqp_msg_handler() - Common handler for node messages
* @scifdev: Remote device to respond to
@@ -1067,8 +1073,6 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
* This routine calls the appropriate routine to handle a Node Qp
* message receipt
*/
-static int scif_max_msg_id = SCIF_MAX_MSG;
-
static void
scif_nodeqp_msg_handler(struct scif_dev *scifdev,
struct scif_qp *qp, struct scifmsg *msg)
@@ -1117,7 +1121,7 @@ void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
/**
* scif_loopb_wq_handler - Loopback Workqueue Handler.
- * @work: loop back work
+ * @unused: loop back work (unused)
*
* This work queue routine is invoked by the loopback work queue handler.
* It grabs the recv lock, dequeues any available messages from the head
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
index 547a71285069..4bdb5ef9a139 100644
--- a/drivers/misc/mic/scif/scif_ports.c
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -14,11 +14,11 @@
struct idr scif_ports;
-/*
+/**
* struct scif_port - SCIF port information
*
- * @ref_cnt - Reference count since there can be multiple endpoints
- * created via scif_accept(..) simultaneously using a port.
+ * @ref_cnt: Reference count since there can be multiple endpoints
+ * created via scif_accept(..) simultaneously using a port.
*/
struct scif_port {
int ref_cnt;
@@ -27,7 +27,8 @@ struct scif_port {
/**
* __scif_get_port - Reserve a specified port # for SCIF and add it
* to the global list.
- * @port : port # to be reserved.
+ * @start: lowest port # to be reserved (inclusive).
+ * @end: highest port # to be reserved (exclusive).
*
* @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
* On memory allocation failure, returns -ENOMEM.
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 406cd5abfa72..de8f61efaef5 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -458,7 +458,7 @@ static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
/**
* scif_create_remote_window:
- * @ep: end point
+ * @scifdev: SCIF device
* @nr_pages: number of pages in window
*
* Allocate and prepare a remote registration window.
@@ -500,7 +500,6 @@ error_ret:
/**
* scif_destroy_remote_window:
- * @ep: end point
* @window: remote registration window
*
* Deallocate resources for remote window.
@@ -1037,6 +1036,7 @@ void scif_free_window_offset(struct scif_endpt *ep,
/**
* scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side is requesting a memory allocation.
@@ -1072,6 +1072,7 @@ error:
/**
* scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remote side responded to a memory allocation.
@@ -1096,6 +1097,7 @@ void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Free up memory kmalloc'd earlier.
@@ -1134,6 +1136,7 @@ scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
/**
* scif_recv_reg: Respond to SCIF_REGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Update remote window list with a new registered window.
@@ -1170,6 +1173,7 @@ void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Remove window from remote registration list;
@@ -1235,6 +1239,7 @@ error:
/**
* scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete registration.
@@ -1253,6 +1258,7 @@ void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that registration
@@ -1272,6 +1278,7 @@ void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to complete unregistration.
@@ -1290,6 +1297,7 @@ void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
/**
* scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
+ * @scifdev: SCIF device
* @msg: Interrupt message
*
* Wake up the window waiting to inform it that unregistration
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 85942f6717c5..55e7f21e51f4 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -438,7 +438,7 @@ error:
/*
* The config ops structure as defined by virtio config
*/
-static struct virtio_config_ops vop_vq_config_ops = {
+static const struct virtio_config_ops vop_vq_config_ops = {
.get_features = vop_get_features,
.finalize_features = vop_finalize_features,
.get = vop_get,
@@ -546,7 +546,7 @@ static int vop_match_desc(struct device *dev, void *data)
return vdev->desc == (void __iomem *)data;
}
-static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
+static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl __iomem *dc)
{
return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
}
@@ -614,7 +614,6 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
struct mic_device_desc __iomem *d;
struct mic_device_ctrl __iomem *dc;
struct device *dev;
- int ret;
for (i = sizeof(struct mic_bootparam);
i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
@@ -644,7 +643,7 @@ static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
&dc->config_change);
put_device(dev);
_vop_handle_config_change(d, i, vpdev);
- ret = _vop_remove_device(d, i, vpdev);
+ _vop_remove_device(d, i, vpdev);
if (remove) {
iowrite8(0, &dc->config_change);
iowrite8(0, &dc->guest_ack);
@@ -763,7 +762,7 @@ static void vop_driver_remove(struct vop_device *vpdev)
kfree(vi);
}
-static struct vop_device_id id_table[] = {
+static const struct vop_device_id id_table[] = {
{ VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index c8e19bfb5ef9..e3b99a39d207 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -273,11 +273,11 @@ static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn,
}
/**
- * Read the template version from the AFU
- * dev: the device for the AFU
- * fn: the AFU offsets
- * len: outputs the template length
- * version: outputs the major<<8,minor version
+ * read_template_version - Read the template version from the AFU
+ * @dev: the device for the AFU
+ * @fn: the AFU offsets
+ * @len: outputs the template length
+ * @version: outputs the major<<8,minor version
*
* Returns 0 on success, negative on failure
*/
@@ -476,10 +476,10 @@ static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu)
}
/**
- * Populate AFU metadata regarding LPC memory
- * dev: the device for the AFU
- * fn: the AFU offsets
- * afu: the AFU struct to populate the LPC metadata into
+ * read_afu_lpc_memory_info - Populate AFU metadata regarding LPC memory
+ * @dev: the device for the AFU
+ * @fn: the AFU offsets
+ * @afu: the AFU struct to populate the LPC metadata into
*
* Returns 0 on success, negative on failure
*/
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 60828af7506a..8d2b7135738e 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -135,6 +135,7 @@ static DEFINE_MUTEX(pch_phub_mutex);
/**
* pch_phub_read_modify_write_reg() - Reading modifying and writing register
+ * @chip: Pointer to the PHUB register structure
* @reg_addr_offset: Register offset address value.
* @data: Writing value.
* @mask: Mask value.
@@ -147,9 +148,8 @@ static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
}
-#ifdef CONFIG_PM
/* pch_phub_save_reg_conf - saves register configuration */
-static void pch_phub_save_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_save_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -210,7 +210,7 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev)
}
/* pch_phub_restore_reg_conf - restore register configuration */
-static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
+static void __maybe_unused pch_phub_restore_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -270,10 +270,10 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
-#endif
/**
* pch_phub_read_serial_rom() - Reading Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address to read.
* @data: Read buffer for specified Serial ROM value.
*/
@@ -288,6 +288,7 @@ static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom() - Writing Serial ROM
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address.
* @data: Serial ROM value to write.
*/
@@ -326,6 +327,7 @@ static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
/**
* pch_phub_read_serial_rom_val() - Read Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value to read.
*/
@@ -342,6 +344,7 @@ static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
/**
* pch_phub_write_serial_rom_val() - writing Serial ROM value
+ * @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value.
*/
@@ -443,7 +446,7 @@ static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip)
/**
* pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Buffer of the Gigabit Ethernet MAC address value.
*/
static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -455,7 +458,7 @@ static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
/**
* pch_phub_write_gbe_mac_addr() - Write MAC address
- * @offset_address: Gigabit Ethernet MAC address offset value.
+ * @chip: Pointer to the PHUB register structure
* @data: Gigabit Ethernet MAC address value.
*/
static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
@@ -835,48 +838,19 @@ static void pch_phub_remove(struct pci_dev *pdev)
kfree(chip);
}
-#ifdef CONFIG_PM
-
-static int pch_phub_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_phub_suspend(struct device *dev_d)
{
- int ret;
-
- pch_phub_save_reg_conf(pdev);
- ret = pci_save_state(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- " %s -pci_save_state returns %d\n", __func__, ret);
- return ret;
- }
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int pch_phub_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_phub_resume(struct device *dev_d)
{
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "%s-pci_enable_device failed(ret=%d) ", __func__, ret);
- return ret;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pch_phub_restore_reg_conf(pdev);
+ device_wakeup_disable(dev_d);
return 0;
}
-#else
-#define pch_phub_suspend NULL
-#define pch_phub_resume NULL
-#endif /* CONFIG_PM */
static const struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
@@ -888,13 +862,14 @@ static const struct pci_device_id pch_phub_pcidev_id[] = {
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
+static SIMPLE_DEV_PM_OPS(pch_phub_pm_ops, pch_phub_suspend, pch_phub_resume);
+
static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
.id_table = pch_phub_pcidev_id,
.probe = pch_phub_probe,
.remove = pch_phub_remove,
- .suspend = pch_phub_suspend,
- .resume = pch_phub_resume
+ .driver.pm = &pch_phub_pm_ops,
};
module_pci_driver(pch_phub_driver);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 6a5ed0e25ff1..ce72e46a2e73 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -457,31 +457,26 @@ static void phantom_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int phantom_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused phantom_suspend(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
- synchronize_irq(pdev->irq);
+ synchronize_irq(to_pci_dev(dev_d)->irq);
return 0;
}
-static int phantom_resume(struct pci_dev *pdev)
+static int __maybe_unused phantom_resume(struct device *dev_d)
{
- struct phantom_device *dev = pci_get_drvdata(pdev);
+ struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
return 0;
}
-#else
-#define phantom_suspend NULL
-#define phantom_resume NULL
-#endif
static struct pci_device_id phantom_pci_tbl[] = {
{ .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
@@ -491,13 +486,14 @@ static struct pci_device_id phantom_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
+static SIMPLE_DEV_PM_OPS(phantom_pm_ops, phantom_suspend, phantom_resume);
+
static struct pci_driver phantom_pci_driver = {
.name = "phantom",
.id_table = phantom_pci_tbl,
.probe = phantom_probe,
.remove = phantom_remove,
- .suspend = phantom_suspend,
- .resume = phantom_resume
+ .driver.pm = &phantom_pm_ops,
};
static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index b7f510676cd6..7236ae527b19 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -496,9 +496,8 @@ static void pti_tty_cleanup(struct tty_struct *tty)
* pti_tty_driver_write()- Write trace debugging data through the char
* interface to the PTI HW. Part of the misc device implementation.
*
- * @filp: Contains private data which is used to obtain
- * master, channel write ID.
- * @data: trace data to be written.
+ * @tty: tty struct containing pti information.
+ * @buf: trace data to be written.
* @len: # of byte to write.
*
* Returns:
@@ -734,8 +733,8 @@ static struct console pti_console = {
* pti_port_activate()- Used to start/initialize any items upon
* first opening of tty_port().
*
- * @port- The tty port number of the PTI device.
- * @tty- The tty struct associated with this device.
+ * @port: The tty port number of the PTI device.
+ * @tty: The tty struct associated with this device.
*
* Returns:
* always returns 0
@@ -755,7 +754,7 @@ static int pti_port_activate(struct tty_port *port, struct tty_struct *tty)
* pti_port_shutdown()- Used to stop/shutdown any items upon the
* last tty port close.
*
- * @port- The tty port number of the PTI device.
+ * @port: The tty port number of the PTI device.
*
* Notes: The primary purpose of the PTI tty port 0 is to hook
* the syslog daemon to it; thus this port will be open for a
@@ -781,8 +780,8 @@ static const struct tty_port_operations tty_port_ops = {
* pti_pci_probe()- Used to detect pti on the pci bus and set
* things up in the driver.
*
- * @pdev- pci_dev struct values for pti.
- * @ent- pci_device_id struct for pti driver.
+ * @pdev: pci_dev struct values for pti.
+ * @ent: pci_device_id struct for pti driver.
*
* Returns:
* 0 for success
@@ -899,7 +898,6 @@ static struct pci_driver pti_pci_driver = {
};
/**
- *
* pti_init()- Overall entry/init call to the pti driver.
* It starts the registration process with the kernel.
*
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index b1521112dbbd..723825524ea0 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/sync_core.h>
#include <linux/prefetch.h>
#include "gru.h"
#include "grutables.h"
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index f7224f90f413..1d75d5e540bc 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -16,6 +16,7 @@
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
+#include <linux/sync_core.h>
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 0197441a1eae..f6e600bfac5d 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -16,6 +16,7 @@
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
+#include <linux/sync_core.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/export.h>
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 5fd94d836070..61b03fcefb13 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -223,7 +223,7 @@ xpc_disconnect(int ch_number)
}
EXPORT_SYMBOL_GPL(xpc_disconnect);
-int __init
+static int __init
xp_init(void)
{
enum xp_retval ret;
@@ -246,7 +246,7 @@ xp_init(void)
module_init(xp_init);
-void __exit
+static void __exit
xp_exit(void)
{
if (is_uv())
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index cb57ac6ab4c3..6cc31789b38d 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -1,7 +1,7 @@
/*
* SRAM protect-exec region helper functions
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 14136d2cc8f9..f4ddd1e67015 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -18,7 +18,8 @@
extern void st_kim_recv(void *, const unsigned char *, long);
void st_int_recv(void *, const unsigned char *, long);
-/* function pointer pointing to either,
+/*
+ * function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
*/
@@ -60,7 +61,8 @@ int st_get_uart_wr_room(struct st_data_s *st_gdata)
return tty->ops->write_room(tty);
}
-/* can be called in from
+/*
+ * can be called in from
* -- KIM (during fw download)
* -- ST Core (during st_write)
*
@@ -100,7 +102,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
kfree_skb(st_gdata->rx_skb);
return;
}
- /* this cannot fail
+ /*
+ * this cannot fail
* this shouldn't take long
* - should be just skb_queue_tail for the
* protocol stack driver
@@ -121,9 +124,8 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
return;
}
-/**
- * st_reg_complete -
- * to call registration complete callbacks
+/*
+ * st_reg_complete - to call registration complete callbacks
* of all protocol stack drivers
* This function is being called with spin lock held, protocol drivers are
* only expected to complete their waits and do nothing more than that.
@@ -156,21 +158,24 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
pr_debug("len %d room %d", len, room);
if (!len) {
- /* Received packet has only packet header and
+ /*
+ * Received packet has only packet header and
* has zero length payload. So, ask ST CORE to
* forward the packet to protocol driver (BT/FM/GPS)
*/
st_send_frame(chnl_id, st_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(st_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
st_gdata->rx_state = ST_W4_DATA;
@@ -178,8 +183,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return len;
}
- /* Change ST state to continue to process next
- * packet */
+ /* Change ST state to continue to process next packet */
st_gdata->rx_state = ST_W4_PACKET_TYPE;
st_gdata->rx_skb = NULL;
st_gdata->rx_count = 0;
@@ -188,7 +192,7 @@ static inline int st_check_data_len(struct st_data_s *st_gdata,
return 0;
}
-/**
+/*
* st_wakeup_ack - internal function for action when wake-up ack
* received
*/
@@ -199,7 +203,8 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
unsigned long flags = 0;
spin_lock_irqsave(&st_gdata->lock, flags);
- /* de-Q from waitQ and Q in txQ now that the
+ /*
+ * de-Q from waitQ and Q in txQ now that the
* chip is awake
*/
while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
@@ -213,7 +218,7 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
st_tx_wakeup(st_gdata);
}
-/**
+/*
* st_int_recv - ST's internal receive function.
* Decodes received RAW data and forwards to corresponding
* client drivers (Bluetooth,FM,GPS..etc).
@@ -262,8 +267,10 @@ void st_int_recv(void *disc_data,
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
- /* Ask ST CORE to forward
- * the packet to protocol driver */
+ /*
+ * Ask ST CORE to forward
+ * the packet to protocol driver
+ */
st_send_frame(st_gdata->rx_chnl, st_gdata);
st_gdata->rx_state = ST_W4_PACKET_TYPE;
@@ -276,7 +283,7 @@ void st_int_recv(void *disc_data,
&st_gdata->rx_skb->data
[proto->offset_len_in_hdr];
pr_debug("plen pointing to %x\n", *plen);
- if (proto->len_size == 1)/* 1 byte len field */
+ if (proto->len_size == 1) /* 1 byte len field */
payload_len = *(unsigned char *)plen;
else if (proto->len_size == 2)
payload_len =
@@ -294,18 +301,23 @@ void st_int_recv(void *disc_data,
}
/* end of if rx_count */
- /* Check first byte of packet and identify module
- * owner (BT/FM/GPS) */
+
+ /*
+ * Check first byte of packet and identify module
+ * owner (BT/FM/GPS)
+ */
switch (*ptr) {
case LL_SLEEP_IND:
case LL_SLEEP_ACK:
case LL_WAKE_UP_IND:
pr_debug("PM packet");
- /* this takes appropriate action based on
+ /*
+ * this takes appropriate action based on
* sleep state received --
*/
st_ll_sleep_state(st_gdata, *ptr);
- /* if WAKEUP_IND collides copy from waitq to txq
+ /*
+ * if WAKEUP_IND collides copy from waitq to txq
* and assume chip awake
*/
spin_unlock_irqrestore(&st_gdata->lock, flags);
@@ -331,7 +343,8 @@ void st_int_recv(void *disc_data,
default:
type = *ptr;
- /* Default case means non-HCILL packets,
+ /*
+ * Default case means non-HCILL packets,
* possibilities are packets for:
* (a) valid protocol - Supported Protocols within
* the ST_MAX_CHANNELS.
@@ -377,7 +390,7 @@ done:
return;
}
-/**
+/*
* st_int_dequeue - internal de-Q function.
* If the previous data set was not written
* completely, return that skb which has the pending data.
@@ -396,7 +409,7 @@ static struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
return skb_dequeue(&st_gdata->txq);
}
-/**
+/*
* st_int_enqueue - internal Q-ing function.
* Will either Q the skb to txq or the tx_waitq
* depending on the ST LL state.
@@ -561,7 +574,8 @@ long st_register(struct st_proto_s *new_proto)
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* this may take a while to complete
+ /*
+ * this may take a while to complete
* since it involves BT fw download
*/
err = st_kim_start(st_gdata->kim_data);
@@ -583,7 +597,8 @@ long st_register(struct st_proto_s *new_proto)
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
- /* this is where all pending registration
+ /*
+ * this is where all pending registration
* are signalled to be complete by calling callback functions
*/
if ((st_gdata->protos_registered != ST_EMPTY) &&
@@ -593,7 +608,8 @@ long st_register(struct st_proto_s *new_proto)
}
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
- /* check for already registered once more,
+ /*
+ * check for already registered once more,
* since the above check is old
*/
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
@@ -622,7 +638,8 @@ long st_register(struct st_proto_s *new_proto)
}
EXPORT_SYMBOL_GPL(st_register);
-/* to unregister a protocol -
+/*
+ * to unregister a protocol -
* to be called from protocol stack driver
*/
long st_unregister(struct st_proto_s *proto)
@@ -742,7 +759,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_info("%s ", __func__);
- /* TODO:
+ /*
+ * TODO:
* if a protocol has been registered & line discipline
* un-installed for some reason - what should be done ?
*/
@@ -795,7 +813,8 @@ static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
pr_debug("done %s", __func__);
}
-/* wake-up function called in from the TTY layer
+/*
+ * wake-up function called in from the TTY layer
* inside the internal wakeup function will be called
*/
static void st_tty_wakeup(struct tty_struct *tty)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index a36ed1ff5967..f2f6cab97c08 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -30,7 +30,7 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
-/**
+/*
* st_get_plat_device -
* function which returns the reference to the platform device
* requested by id. As of now only 1 such device exists (id=0)
@@ -43,7 +43,7 @@ static struct platform_device *st_get_plat_device(int id)
return st_kim_devices[id];
}
-/**
+/*
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
@@ -55,7 +55,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
if (!skb)
return;
- /* these magic numbers are the position in the response buffer which
+ /*
+ * these magic numbers are the position in the response buffer which
* allows us to distinguish whether the response is for the read
* version info. command
*/
@@ -79,7 +80,8 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
kfree_skb(skb);
}
-/* check for data len received inside kim_int_recv
+/*
+ * check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
@@ -91,14 +93,16 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
if (!len) {
validate_firmware_response(kim_gdata);
} else if (len > room) {
- /* Received packet's payload length is larger.
+ /*
+ * Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(kim_gdata->rx_skb);
} else {
- /* Packet header has non-zero payload length and
+ /*
+ * Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
kim_gdata->rx_state = ST_W4_DATA;
@@ -106,8 +110,10 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return len;
}
- /* Change ST LL state to continue to process next
- * packet */
+ /*
+ * Change ST LL state to continue to process next
+ * packet
+ */
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
@@ -115,7 +121,7 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
return 0;
}
-/**
+/*
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
@@ -216,7 +222,8 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
return timeout ? -ERESTARTSYS : -ETIMEDOUT;
}
reinit_completion(&kim_gdata->kim_rcvd);
- /* the positions 12 & 13 in the response buffer provide with the
+ /*
+ * the positions 12 & 13 in the response buffer provide with the
* chip, major & minor numbers
*/
@@ -263,7 +270,7 @@ static void skip_change_remote_baud(unsigned char **ptr, long *len)
}
}
-/**
+/*
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
@@ -295,7 +302,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
- /* bts_header to remove out magic number and
+ /*
+ * bts_header to remove out magic number and
* version
*/
ptr += sizeof(struct bts_header);
@@ -313,8 +321,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
0xFF36)) {
- /* ignore remote change
- * baud rate HCI VS command */
+ /*
+ * ignore remote change
+ * baud rate HCI VS command
+ */
pr_warn("change remote baud"
" rate command in firmware");
skip_change_remote_baud(&ptr, &len);
@@ -346,7 +356,8 @@ static long download_firmware(struct kim_data_s *kim_gdata)
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
- /* reinit completion before sending for the
+ /*
+ * reinit completion before sending for the
* relevant wait
*/
reinit_completion(&kim_gdata->kim_rcvd);
@@ -418,14 +429,16 @@ void st_kim_recv(void *disc_data, const unsigned char *data, long count)
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
- /* proceed to gather all data and distinguish read fw version response
+ /*
+ * proceed to gather all data and distinguish read fw version response
* from other fw responses when data gathering is complete
*/
kim_int_recv(kim_gdata, data, count);
return;
}
-/* to signal completion of line discipline installation
+/*
+ * to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
void st_kim_complete(void *kim_data)
@@ -434,7 +447,7 @@ void st_kim_complete(void *kim_data)
complete(&kim_gdata->ldisc_installed);
}
-/**
+/*
* st_kim_start - called from ST Core upon 1st registration
* This involves toggling the chip enable gpio, reading
* the firmware version from chip, forming the fw file name
@@ -472,8 +485,10 @@ long st_kim_start(void *kim_data)
err = wait_for_completion_interruptible_timeout(
&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME));
if (!err) {
- /* ldisc installation timeout,
- * flush uart, power cycle BT_EN */
+ /*
+ * ldisc installation timeout,
+ * flush uart, power cycle BT_EN
+ */
pr_err("ldisc installation timeout");
err = st_kim_stop(kim_gdata);
continue;
@@ -482,8 +497,10 @@ long st_kim_start(void *kim_data)
pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
- /* ldisc installed but fw download failed,
- * flush uart & power cycle BT_EN */
+ /*
+ * ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN
+ */
pr_err("download firmware failed");
err = st_kim_stop(kim_gdata);
continue;
@@ -495,7 +512,7 @@ long st_kim_start(void *kim_data)
return err;
}
-/**
+/*
* st_kim_stop - stop communication with chip.
* This can be called from ST Core/KIM, on the-
* (a) last un-register when chip need not be powered there-after,
@@ -650,7 +667,7 @@ static const struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
-/**
+/*
* st_kim_ref - reference the core's data
* This references the per-ST platform device in the arch/xx/
* board-xx.c file.
@@ -729,8 +746,7 @@ static int kim_probe(struct platform_device *pdev)
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
goto err_sysfs_group;
}
- /* get reference of pdev for request_firmware
- */
+ /* get reference of pdev for request_firmware */
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
@@ -772,7 +788,8 @@ static int kim_remove(struct platform_device *pdev)
kim_gdata = platform_get_drvdata(pdev);
- /* Free the Bluetooth/FM/GPIO
+ /*
+ * Free the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(pdata->nshutdown_gpio);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index e6b40aa8fb42..228f2eb1d476 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -207,10 +207,9 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
spin_unlock_irqrestore(&fm->lock, flags);
}
-#ifdef CONFIG_PM
-
-static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
@@ -221,15 +220,13 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
}
- pci_save_state(dev);
- pci_enable_wake(dev, pci_choose_state(dev, state), 0);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
+ device_wakeup_disable(dev_d);
return 0;
}
-static int tifm_7xx1_resume(struct pci_dev *dev)
+static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
{
+ struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
unsigned long timeout;
@@ -242,11 +239,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
return -ENXIO;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
pci_set_master(dev);
dev_dbg(&dev->dev, "resuming host\n");
@@ -297,13 +289,6 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
return 0;
}
-#else
-
-#define tifm_7xx1_suspend NULL
-#define tifm_7xx1_resume NULL
-
-#endif /* CONFIG_PM */
-
static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
@@ -424,13 +409,14 @@ static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume);
+
static struct pci_driver tifm_7xx1_driver = {
.name = DRIVER_NAME,
.id_table = tifm_7xx1_pci_tbl,
.probe = tifm_7xx1_probe,
.remove = tifm_7xx1_remove,
- .suspend = tifm_7xx1_suspend,
- .resume = tifm_7xx1_resume,
+ .driver.pm = &tifm_7xx1_pm_ops,
};
module_pci_driver(tifm_7xx1_driver);
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 107028e77ca3..aa91f69a5fa9 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -179,14 +179,6 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
return 0;
}
-static vm_fault_t uacce_vma_fault(struct vm_fault *vmf)
-{
- if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
- return VM_FAULT_SIGBUS;
-
- return 0;
-}
-
static void uacce_vma_close(struct vm_area_struct *vma)
{
struct uacce_queue *q = vma->vm_private_data;
@@ -199,7 +191,6 @@ static void uacce_vma_close(struct vm_area_struct *vma)
}
static const struct vm_operations_struct uacce_vm_ops = {
- .fault = uacce_vma_fault,
.close = uacce_vma_close,
};
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 7896952de1ac..fa313b634135 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -312,10 +312,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&block_mutex);
if (md) {
- if (md->usage == 2)
- check_disk_change(bdev);
ret = 0;
-
if ((mode & FMODE_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
@@ -1446,7 +1443,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
*/
if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
@@ -1926,7 +1923,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
*/
if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
@@ -1936,7 +1933,7 @@ void mmc_blk_mq_complete(struct request *req)
if (mq->use_cqe)
mmc_blk_cqe_complete_rq(mq, req);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
mmc_blk_mq_complete_rq(mq, req);
}
@@ -1988,7 +1985,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
*/
if (mq->in_recovery)
mmc_blk_mq_complete_rq(mq, req);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
mmc_blk_mq_dec_in_flight(mq, req);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index cba7a6fcd178..447552ac25c4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1108,24 +1108,18 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static int jz4740_mmc_suspend(struct device *dev)
+static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
{
return pinctrl_pm_select_sleep_state(dev);
}
-static int jz4740_mmc_resume(struct device *dev)
+static int __maybe_unused jz4740_mmc_resume(struct device *dev)
{
return pinctrl_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
jz4740_mmc_resume);
-#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
-#else
-#define JZ4740_MMC_PM_OPS NULL
-#endif
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
@@ -1133,7 +1127,7 @@ static struct platform_driver jz4740_mmc_driver = {
.driver = {
.name = "jz4740-mmc",
.of_match_table = of_match_ptr(jz4740_mmc_of_match),
- .pm = JZ4740_MMC_PM_OPS,
+ .pm = pm_ptr(&jz4740_mmc_pm_ops),
},
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 5a71f6678fd3..bce910de8b4d 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1336,6 +1336,8 @@ static void init_extra_caps(struct realtek_pci_sdmmc *host)
mmc->caps |= MMC_CAP_1_8V_DDR;
if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT)
mmc->caps |= MMC_CAP_8_BIT_DATA;
+ if (pcr->extra_caps & EXTRA_CAPS_NO_MMC)
+ mmc->caps2 |= MMC_CAP2_NO_MMC;
}
static void realtek_init_host(struct realtek_pci_sdmmc *host)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 37b1158c1c0c..1ee866a38794 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -493,7 +493,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
{
unsigned long flags;
size_t blksize, len, chunk;
- u32 uninitialized_var(scratch);
+ u32 scratch;
u8 *buf;
DBG("PIO reading\n");
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c5935b2f9cd1..b40f46a43fc6 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -355,9 +355,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint32_t retlen;
int ret = 0;
- if (!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
if (length > 4096)
return -EINVAL;
@@ -643,6 +640,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
pr_debug("MTD_ioctl\n");
+ /*
+ * Check the file mode to require "dangerous" commands to have write
+ * permissions.
+ */
+ switch (cmd) {
+ /* "safe" commands */
+ case MEMGETREGIONCOUNT:
+ case MEMGETREGIONINFO:
+ case MEMGETINFO:
+ case MEMREADOOB:
+ case MEMREADOOB64:
+ case MEMLOCK:
+ case MEMUNLOCK:
+ case MEMISLOCKED:
+ case MEMGETOOBSEL:
+ case MEMGETBADBLOCK:
+ case MEMSETBADBLOCK:
+ case OTPSELECT:
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ case OTPLOCK:
+ case ECCGETLAYOUT:
+ case ECCGETSTATS:
+ case MTDFILEMODE:
+ case BLKPG:
+ case BLKRRPART:
+ break;
+
+ /* "dangerous" commands */
+ case MEMERASE:
+ case MEMERASE64:
+ case MEMWRITEOOB:
+ case MEMWRITEOOB64:
+ case MEMWRITE:
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
@@ -690,9 +729,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct erase_info *erase;
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
@@ -985,9 +1021,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
ret = 0;
break;
}
-
- default:
- ret = -ENOTTY;
}
return ret;
@@ -1031,6 +1064,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
struct mtd_oob_buf32 buf;
struct mtd_oob_buf32 __user *buf_user = argp;
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EPERM;
+ break;
+ }
+
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c
index 09fdced659f5..b6a46b1b7781 100644
--- a/drivers/mtd/nand/raw/nand_ecc.c
+++ b/drivers/mtd/nand/raw/nand_ecc.c
@@ -131,7 +131,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
- uint32_t uninitialized_var(rp17); /* to make compiler happy */
+ uint32_t rp17;
uint32_t par; /* the cumulative parity for all data */
uint32_t tmppar; /* the cumulative parity for this iteration;
for rp12, rp14 and rp16 at the end of the
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index f86dff311464..d0dd0c446e4d 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -291,7 +291,7 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+ unsigned long set, cfg, mask;
unsigned long flags;
/* calculate the timing information for the controller */
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
index 752b6cf005f7..980e332bdac4 100644
--- a/drivers/mtd/parsers/afs.c
+++ b/drivers/mtd/parsers/afs.c
@@ -126,8 +126,8 @@ static int afs_parse_v1_partition(struct mtd_info *mtd,
* Static checks cannot see that we bail out if we have an error
* reading the footer.
*/
- u_int uninitialized_var(iis_ptr);
- u_int uninitialized_var(img_ptr);
+ u_int iis_ptr;
+ u_int img_ptr;
u_int ptr;
size_t sz;
int ret;
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
index d89a5ea9446a..5c0e0ec2e6d1 100644
--- a/drivers/mtd/spi-nor/controllers/Kconfig
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -9,17 +9,6 @@ config SPI_ASPEED_SMC
and support for the SPI flash memory controller (SPI) for
the host firmware. The implementation only supports SPI NOR.
-config SPI_CADENCE_QUADSPI
- tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || ARM64 || COMPILE_TEST)
- help
- Enable support for the Cadence Quad SPI Flash controller.
-
- Cadence QSPI is a specialized controller for connecting an SPI
- Flash over 1/2/4-bit wide bus. Enable this option if you have a
- device with a Cadence QSPI controller and want to access the
- Flash as an MTD device.
-
config SPI_HISI_SFC
tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile
index 46e6fbe586e3..e7abba491d98 100644
--- a/drivers/mtd/spi-nor/controllers/Makefile
+++ b/drivers/mtd/spi-nor/controllers/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
-obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5133e1be5331..0edecfdbd01f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -599,7 +599,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
- uint32_t uninitialized_var(crc);
+ uint32_t crc;
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 89096f10f4c4..12466b06692c 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -6,9 +6,9 @@
*/
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mux/driver.h>
-#include <linux/of_platform.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
@@ -59,7 +59,7 @@ static int adgs1408_probe(struct spi_device *spi)
s32 idle_state;
int ret;
- chip_id = (enum adgs1408_chip_id)of_device_get_match_data(dev);
+ chip_id = (enum adgs1408_chip_id)device_get_match_data(dev);
if (!chip_id)
chip_id = spi_get_device_id(spi)->driver_data;
@@ -119,7 +119,7 @@ MODULE_DEVICE_TABLE(of, adgs1408_of_match);
static struct spi_driver adgs1408_driver = {
.driver = {
.name = "adgs1408",
- .of_match_table = of_match_ptr(adgs1408_of_match),
+ .of_match_table = adgs1408_of_match,
},
.probe = adgs1408_probe,
.id_table = adgs1408_spi_id,
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 3dd46cd55114..88e7900853db 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -407,19 +407,34 @@ free_dst:
return err;
}
+static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
+{
+ if (bareudp->ethertype == proto)
+ return true;
+
+ if (!bareudp->multi_proto_mode)
+ return false;
+
+ if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
+ proto == htons(ETH_P_MPLS_MC))
+ return true;
+
+ if (bareudp->ethertype == htons(ETH_P_IP) &&
+ proto == htons(ETH_P_IPV6))
+ return true;
+
+ return false;
+}
+
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
int err;
- if (skb->protocol != bareudp->ethertype) {
- if (!bareudp->multi_proto_mode ||
- (skb->protocol != htons(ETH_P_MPLS_MC) &&
- skb->protocol != htons(ETH_P_IPV6))) {
- err = -EINVAL;
- goto tx_error;
- }
+ if (!bareudp_proto_valid(bareudp, skb->protocol)) {
+ err = -EINVAL;
+ goto tx_error;
}
info = skb_tunnel_info(skb);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index a761092e6ac9..f929db893957 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1451,7 +1451,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg uninitialized_var(msg);
+ struct ican3_msg msg;
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e1c236cab2a7..c8cc14eadbb4 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1455,7 +1455,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
static void
bnx2_enable_forced_2g5(struct bnx2 *bp)
{
- u32 uninitialized_var(bmcr);
+ u32 bmcr;
int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
@@ -1499,7 +1499,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
static void
bnx2_disable_forced_2g5(struct bnx2 *bp)
{
- u32 uninitialized_var(bmcr);
+ u32 bmcr;
int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8d13ea370db1..66e67b24a887 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
+ clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(port->pclk);
return ret;
+ }
ret = register_netdev(netdev);
if (!ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 33c481d11116..71ed4c54f6d5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
- if (type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
- int ret;
-
- ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret < 0))
- return ret;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
- netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bb4a6327035d..36575e72a915 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
- spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
- spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
-
- spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
- memset(&new_tuples, 0, sizeof(new_tuples));
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
- spin_lock_bh(&hdev->fd_rule_lock);
-
/* when there is already fd rule existed add by user,
* arfs should not work
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
- if (!enable)
+
+ if (!enable) {
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
- else
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ } else {
hclge_restore_fd_entries(handle);
+ }
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
+ spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a10b022d1951..9162856de1b1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
+ int ret;
rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- rtnl_unlock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
- else
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
- rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fd7eae25fe9..5afb3c9c52d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f999cca37a8a..489bb5b59475 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
- if (ret_val) {
+ if (ret_val)
e_warn("Failed to disable ULP\n");
- goto out;
- }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8bb3db2cbd41..6e5861bfb0fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 64786568af0d..75a8c407e815 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
+ rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
+ rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index f4227517dc8e..92a3db69a6cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f6a1f8666f95..a1c45b39a230 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
{
u32 val;
int ret;
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- if (state->interface !=
- PHY_INTERFACE_MODE_TRGMII)
- mtk_gmac0_rgmii_adjust(mac->hw,
- state->speed);
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3d9aa7da95e9..2d3e45780719 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4356,12 +4356,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index bdb71332cbf2..3e44e4d820c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index eefeb1cdc2ee..245a99f69641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
- tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else {
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
if (!tun_dst) {
- WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 951ea26d96bc..e472ed0eacfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 58b13192df23..2805416c32a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 37b176801bcc..038a0f1cecec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 081f15074cac..3b892ec301b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
+ enum mlx5_port_status state)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int vport_admin_state;
+
+ mlx5_set_port_admin_status(mdev, state);
+
+ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ if (state == MLX5_PORT_UP)
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ else
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
+ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
@@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@@ -5390,6 +5409,8 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ cancel_work_sync(&priv->update_stats_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 006807e04eda..9519a61bd8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -936,6 +936,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cc8412151ca0..fcedb5bdca9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1116ab9bea6c..43005caff09e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1608,7 +1608,7 @@ abort:
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
-
+ esw_destroy_tsar(esw);
return err;
}
@@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
- esw_destroy_tsar(esw);
-
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ esw_destroy_tsar(esw);
+
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
+ if (vport == MLX5_VPORT_UPLINK) {
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
+ other_vport = 0;
+ vport = 0;
+ }
mutex_lock(&esw->state_lock);
- err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport, 1, link_state);
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
- mlx5_core_warn(esw->dev,
- "Failed to set vport %d link state, err = %d",
- vport, err);
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
+ vport, opmod, err);
goto unlock;
}
@@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a5175e98c0b3..5785596f13f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 060354bb211a..ed75353c56b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
+static void
+mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+ attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
-
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 13e2fb79c21a..2569bb6228b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
-/* If reverse if false then return the first flow table in next priority of
+/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
-static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_namespace *root_ns = &root->ns;
- struct fs_prio *iter_prio;
- struct fs_prio *prio;
-
- fs_get_obj(prio, ns->node.parent);
- list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
- if (iter_prio == prio &&
- !list_is_last(&prio->node.children, &iter_prio->node.list))
- return list_next_entry(iter_prio, node.list);
- }
- return NULL;
-}
-
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
- struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
+ bool next_ns;
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
- prio = find_fwd_ns_prio(root, ft->ns);
- else
- fs_get_obj(prio, ft->node.parent);
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
- return (prio) ? find_next_chained_ft(prio) : NULL;
+ return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ef0706d15a5b..2d55b7c22c03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+
if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
- pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
- pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
+enum {
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ switch (func) {
+ case PTP_PF_NONE:
+ return 0;
+ case PTP_PF_EXTTS:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+ case PTP_PF_PEROUT:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+ u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+{
+ struct mlx5_core_dev *mdev = clock->mdev;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+ u8 mode;
+ int err;
+
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
+ return PTP_PF_NONE;
+
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
+
+ if (mode == MLX5_PIN_MODE_IN)
+ return PTP_PF_EXTTS;
+ else if (mode == MLX5_PIN_MODE_OUT)
+ return PTP_PF_PEROUT;
+
+ return PTP_PF_NONE;
+}
+
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
- clock->ptp_info.pin_config[i].chan = i;
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].chan = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 5ddd18639a1e..c410a0ce35c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -529,8 +529,8 @@ static int req_pages_handler(struct notifier_block *nb,
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- u16 uninitialized_var(func_id);
- s32 uninitialized_var(npages);
+ u16 func_id;
+ s32 npages;
int err;
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index d6d6fe64887b..71b6185b4904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
- kfree(trans);
+ kfree_rcu(trans, rcu);
return err;
}
return 0;
@@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
- rcu_read_unlock();
- if (!found)
+ if (!found) {
+ rcu_read_unlock();
goto drop;
+ }
rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
return;
drop:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index fcb88d4271bf..8ac987c8c8bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 019ed503aadf..0521e9d48c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
- /* Packets with link-local destination IP arriving to the router
- * are trapped to the CPU, so no need to program specific routes
- * for them. Only allow prefix routes (usually one fe80::/64) so
- * that packets are trapped for the right reason.
- */
- if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
- (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
- return true;
-
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
@@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
- err = register_inetaddr_notifier(&router->inetaddr_nb);
- if (err)
- goto err_register_inetaddr_notifier;
-
- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
- err = register_inet6addr_notifier(&router->inet6addr_nb);
- if (err)
- goto err_register_inet6addr_notifier;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_init;
- mlxsw_sp->router->netevent_nb.notifier_call =
- mlxsw_sp_router_netevent_event;
- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
if (err)
goto err_mp_hash_init;
@@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
-err_dscp_init:
-err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+err_dscp_init:
+err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -8174,10 +8170,6 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- unregister_inet6addr_notifier(&router->inet6addr_nb);
-err_register_inet6addr_notifier:
- unregister_inetaddr_notifier(&router->inetaddr_nb);
-err_register_inetaddr_notifier:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 157a42c63066..1e38dfe7cf64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -422,6 +425,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ .priority = 1,
+ },
+ {
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
.priority = 2,
@@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 9cfe1fd98c30..f17da67a4622 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 67e62603fe3b..15b8b1bf8163 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7276,7 +7276,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- struct lro *uninitialized_var(lro);
+ struct lro *lro;
u8 err_mask;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index d2708a57f2ff..4075f5e59955 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
err = nixge_of_get_resources(pdev);
if (err)
- return err;
+ goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
- return priv->tx_irq;
+ err = priv->tx_irq;
+ goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
- return priv->rx_irq;
+ err = priv->rx_irq;
+ goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 5fd31ba56937..e55d41546cff 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -2001,7 +2001,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
if (err)
- return err;
+ goto reset_out;
}
if (cb)
@@ -2011,6 +2011,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
err = ionic_open(lif->netdev);
netif_device_attach(lif->netdev);
}
+
+reset_out:
mutex_unlock(&lif->queue_lock);
return err;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 0fade19e00d4..0d0e38debbc2 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3769,7 +3769,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
struct net_device *ndev = NULL;
struct ql3_adapter *qdev = NULL;
static int cards_found;
- int uninitialized_var(pci_using_dac), err;
+ int pci_using_dac, err;
err = pci_enable_device(pdev);
if (err) {
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index debd3c3fa6fb..015fdb851cdb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2271,7 +2271,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
drops = 0;
while (1) {
struct cas_rx_comp *rxc = rxcs + entry;
- struct sk_buff *uninitialized_var(skb);
+ struct sk_buff *skb;
int type, len;
u64 words[4];
int i, dring;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9a5004f674c7..1b697e4cd7dc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -429,7 +429,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u16 pll_cfg, pll_sts;
int max_retry = 100;
- u64 uninitialized_var(sig), mask, val;
+ u64 sig, mask, val;
u32 tx_cfg, rx_cfg;
unsigned long i;
int err;
@@ -526,7 +526,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
int max_retry = 100;
- u64 uninitialized_var(sig), mask, val;
+ u64 sig, mask, val;
unsigned long i;
int err;
@@ -714,7 +714,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
static int esr_reset(struct niu *np)
{
- u32 uninitialized_var(reset);
+ u32 reset;
int err;
err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 5f123a8cf68e..d2fdb5430d27 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
minor = get_free_serial_index();
if (minor < 0)
- goto exit;
+ goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
+ if (IS_ERR(serial->parent->dev))
+ goto exit2;
/* fill in specific data for later use */
serial->minor = minor;
@@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
+exit2:
hso_serial_common_free(serial);
return -1;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index eccbf4cd7149..442507f25aad 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -377,10 +377,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
- struct usb_host_endpoint *ep_blkin;
- struct usb_host_endpoint *ep_blkout;
- struct usb_host_endpoint *ep_intr;
-
int msg_enable;
struct urb *urb_intr;
@@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int
-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
-{
- int tmp;
- struct usb_host_interface *alt = NULL;
- struct usb_host_endpoint *in = NULL, *out = NULL;
- struct usb_host_endpoint *status = NULL;
-
- for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
- unsigned ep;
-
- in = NULL;
- out = NULL;
- status = NULL;
- alt = intf->altsetting + tmp;
-
- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
- struct usb_host_endpoint *e;
- int intr = 0;
-
- e = alt->endpoint + ep;
- switch (e->desc.bmAttributes) {
- case USB_ENDPOINT_XFER_INT:
- if (!usb_endpoint_dir_in(&e->desc))
- continue;
- intr = 1;
- /* FALLTHROUGH */
- case USB_ENDPOINT_XFER_BULK:
- break;
- default:
- continue;
- }
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!intr && !in)
- in = e;
- else if (intr && !status)
- status = e;
- } else {
- if (!out)
- out = e;
- }
- }
- if (in && out)
- break;
- }
- if (!alt || !in || !out)
- return -EINVAL;
-
- dev->pipe_in = usb_rcvbulkpipe(dev->udev,
- in->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->pipe_out = usb_sndbulkpipe(dev->udev,
- out->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->ep_intr = status;
-
- return 0;
-}
-
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
- ret = lan78xx_get_endpoints(dev, intf);
- if (ret) {
- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
- ret);
- return ret;
- }
-
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
+ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
+ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ ep_intr = &intf->cur_altsetting->endpoint[2];
+ if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ usb_endpoint_num(&ep_intr->desc));
+
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
-
- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
-
- dev->pipe_intr = usb_rcvintpipe(dev->udev,
- dev->ep_intr->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- period = dev->ep_intr->desc.bInterval;
-
+ period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
@@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
usb_fill_int_urb(dev->urb_intr, dev->udev,
dev->pipe_intr, buf, maxp,
intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 89d85dcb200e..a7c3939264b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
+ rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
@@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, NULL);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip_nh:
*idx += 1;
continue;
@@ -1403,12 +1406,15 @@ skip_nh:
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip:
*idx += 1;
}
}
+ rcu_read_unlock();
}
out:
return err;
@@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
- if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f, true, true);
+ if (is_zero_ether_addr(f->eth_addr) &&
+ f->vni == vxlan->cfg.vni)
+ continue;
+ vxlan_fdb_destroy(vxlan, f, true, true);
}
spin_unlock_bh(&vxlan->hash_lock[h]);
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 7916efce7188..f5198a391417 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -569,8 +569,8 @@ static void do_bottom_half_rx(struct fst_card_info *card);
static void fst_process_tx_work_q(unsigned long work_q);
static void fst_process_int_work_q(unsigned long work_q);
-static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
-static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+static DECLARE_TASKLET_OLD(fst_tx_task, fst_process_tx_work_q);
+static DECLARE_TASKLET_OLD(fst_int_task, fst_process_int_work_q);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
static spinlock_t fst_work_q_lock;
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 7ad3d24195ba..138930c66ad2 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -702,7 +702,7 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
struct z8530_dev *dev=dev_id;
- u8 uninitialized_var(intr);
+ u8 intr;
static volatile int locker=0;
int work=0;
struct z8530_irqhandler *irqs;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 22b6937ac225..340ce327ac14 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2240,7 +2240,7 @@ static int ath10k_init_uart(struct ath10k *ar)
static int ath10k_init_hw_params(struct ath10k *ar)
{
- const struct ath10k_hw_params *uninitialized_var(hw_params);
+ const struct ath10k_hw_params *hw_params;
int i;
for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index aa1c71a76ef7..811fad6d60c0 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1575,7 +1575,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
int ath6kl_init_hw_params(struct ath6kl *ar)
{
- const struct ath6kl_hw *uninitialized_var(hw);
+ const struct ath6kl_hw *hw;
int i;
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 289a2444d534..4d72cd7daaa2 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -230,7 +230,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
u32 val;
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index dc1819ca52ac..89a25aefb327 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -493,7 +493,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
struct b43_wldev *dev;
struct b43_debugfs_fops *dfops;
struct b43_dfs_file *dfile;
- ssize_t uninitialized_var(ret);
+ ssize_t ret;
char *buf;
const size_t bufsize = 1024 * 16; /* 16 kiB buffer */
const size_t buforder = get_order(bufsize);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 9733c64bf978..ca671fc13116 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -37,7 +37,7 @@
static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
enum b43_addrtype addrtype)
{
- u32 uninitialized_var(addr);
+ u32 addr;
switch (addrtype) {
case B43_DMA_ADDR_LOW:
diff --git a/drivers/net/wireless/broadcom/b43/lo.c b/drivers/net/wireless/broadcom/b43/lo.c
index 5d97cf06eceb..338b6545a1e7 100644
--- a/drivers/net/wireless/broadcom/b43/lo.c
+++ b/drivers/net/wireless/broadcom/b43/lo.c
@@ -729,7 +729,7 @@ struct b43_lo_calib *b43_calibrate_lo_setting(struct b43_wldev *dev,
};
int max_rx_gain;
struct b43_lo_calib *cal;
- struct lo_g_saved_values uninitialized_var(saved_regs);
+ struct lo_g_saved_values saved_regs;
/* Values from the "TXCTL Register and Value Table" */
u16 txctl_reg;
u16 txctl_value;
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index c33b4235839d..39de18d3ce91 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -4222,7 +4222,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
u32 rfpwr_offset;
u8 pga_gain, pad_gain;
int i;
- const s16 *uninitialized_var(rf_pwr_offset_table);
+ const s16 *rf_pwr_offset_table = NULL;
table = b43_nphy_get_tx_gain_table(dev);
if (!table)
@@ -5643,7 +5643,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
u8 rfctl[2];
u8 afectl_core;
u16 tmp[6];
- u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
+ u16 cur_hpf1, cur_hpf2, cur_lna;
u32 real, imag;
enum nl80211_band band;
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 55babc6d1091..7651b1bdb592 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -422,10 +422,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
unsigned int len;
- struct ieee80211_hdr *uninitialized_var(hdr);
+ struct ieee80211_hdr *hdr;
int rts_rate, rts_rate_fb;
int rts_rate_ofdm, rts_rate_fb_ofdm;
- struct b43_plcp_hdr6 *uninitialized_var(plcp);
+ struct b43_plcp_hdr6 *plcp;
struct ieee80211_rate *rts_cts_rate;
rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info);
@@ -436,7 +436,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- struct ieee80211_cts *uninitialized_var(cts);
+ struct ieee80211_cts *cts;
switch (dev->fw.hdr_format) {
case B43_FW_HDR_598:
@@ -458,7 +458,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
mac_ctl |= B43_TXH_MAC_SENDCTS;
len = sizeof(struct ieee80211_cts);
} else {
- struct ieee80211_rts *uninitialized_var(rts);
+ struct ieee80211_rts *rts;
switch (dev->fw.hdr_format) {
case B43_FW_HDR_598:
@@ -637,8 +637,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
const struct b43_rxhdr_fw4 *rxhdr = _rxhdr;
__le16 fctl;
u16 phystat0, phystat3;
- u16 uninitialized_var(chanstat), uninitialized_var(mactime);
- u32 uninitialized_var(macstat);
+ u16 chanstat, mactime;
+ u32 macstat;
u16 chanid;
int padding, rate_idx;
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index fa133dfb2ecb..e7e4293c01f2 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -190,7 +190,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
struct b43legacy_wldev *dev;
struct b43legacy_debugfs_fops *dfops;
struct b43legacy_dfs_file *dfile;
- ssize_t uninitialized_var(ret);
+ ssize_t ret;
char *buf;
const size_t bufsize = 1024 * 16; /* 16 KiB buffer */
const size_t buforder = get_order(bufsize);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 5208a39fd6f7..220c11d34c23 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -2580,7 +2580,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev)
static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
unsigned int new_mode)
{
- struct b43legacy_wldev *uninitialized_var(up_dev);
+ struct b43legacy_wldev *up_dev;
struct b43legacy_wldev *down_dev;
int err;
bool gmode = false;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 2ac494f5ae22..fd63eba47ba2 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -2100,7 +2100,7 @@ il3945_txpower_set_from_eeprom(struct il_priv *il)
/* set tx power value for all OFDM rates */
for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
- s32 uninitialized_var(power_idx);
+ s32 power_idx;
int rc;
/* use channel group's clip-power table,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index da6d4202611c..a159d1d18c2c 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2769,7 +2769,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
struct ieee80211_tx_info *info;
struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
+ int tid;
int sta_id;
int freed;
u8 *qc = NULL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index f070f25bb735..0ae9cfc65272 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -592,7 +592,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
bool wmm_enable,
u8 queue_sel)
{
- u16 uninitialized_var(value);
+ u16 value;
switch (queue_sel) {
case TX_SELE_HQ:
@@ -606,7 +606,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
break;
default:
WARN_ON(1); /* Shall not reach here! */
- break;
+ return;
}
_rtl92c_init_chipn_reg_priority(hw, value, value, value, value,
value, value);
@@ -618,8 +618,8 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
- u16 uninitialized_var(valuehi);
- u16 uninitialized_var(valuelow);
+ u16 valuehi;
+ u16 valuelow;
switch (queue_sel) {
case (TX_SELE_HQ | TX_SELE_LQ):
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 39030a324d7f..1f718381a045 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
return err;
}
-static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
@@ -225,6 +225,7 @@ static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
static const struct block_device_operations nd_blk_fops = {
.owner = THIS_MODULE,
+ .submit_bio = nd_blk_submit_bio,
.revalidate_disk = nvdimm_revalidate_disk,
};
@@ -250,7 +251,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE);
+ q = blk_alloc_queue(NUMA_NO_NODE);
if (!q)
return -ENOMEM;
if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 48e9d169b6f9..412d21d8f643 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1439,7 +1439,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
return ret;
}
-static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_disk->private_data;
@@ -1512,6 +1512,7 @@ static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
static const struct block_device_operations btt_fops = {
.owner = THIS_MODULE,
+ .submit_bio = btt_submit_bio,
.rw_page = btt_rw_page,
.getgeo = btt_getgeo,
.revalidate_disk = nvdimm_revalidate_disk,
@@ -1523,7 +1524,7 @@ static int btt_blk_init(struct btt *btt)
struct nd_namespace_common *ndns = nd_btt->ndns;
/* create a new disk and request queue for btt */
- btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE);
+ btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE);
if (!btt->btt_queue)
return -ENOMEM;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index d25e66fd942d..94790e6e0e4c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -189,7 +189,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
return rc;
}
-static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
@@ -281,6 +281,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
+ .submit_bio = pmem_submit_bio,
.rw_page = pmem_rw_page,
.revalidate_disk = nvdimm_revalidate_disk,
};
@@ -423,7 +424,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- q = blk_alloc_queue(pmem_make_request, dev_to_node(dev));
+ q = blk_alloc_queue(dev_to_node(dev));
if (!q)
return -ENOMEM;
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index fc7b26be692d..d7f6a87687b8 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -13,6 +13,7 @@ nvme-core-y := core.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
+nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index add040168e67..88cff309d8e4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -89,7 +89,7 @@ static dev_t nvme_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
-static int nvme_revalidate_disk(struct gendisk *disk);
+static int _nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
@@ -100,7 +100,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
* Revalidating a dead namespace sets capacity to 0. This will end
* buffered writers dirtying pages that can't be synced.
*/
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
@@ -287,6 +287,10 @@ void nvme_complete_rq(struct request *req)
nvme_retry_req(req);
return;
}
+ } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND) {
+ req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ le64_to_cpu(nvme_req(req)->result.u64));
}
nvme_trace_bio_complete(req, status);
@@ -304,7 +308,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
return true;
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
- blk_mq_force_complete_rq(req);
+ blk_mq_complete_request(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -362,6 +366,16 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
break;
+ case NVME_CTRL_DELETING_NOIO:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_DEAD:
switch (old_state) {
case NVME_CTRL_DELETING:
@@ -399,6 +413,7 @@ static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
case NVME_CTRL_CONNECTING:
return false;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
case NVME_CTRL_DEAD:
return true;
default:
@@ -450,10 +465,11 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
-static void nvme_put_ns(struct nvme_ns *ns)
+void nvme_put_ns(struct nvme_ns *ns)
{
kref_put(&ns->kref, nvme_free_ns);
}
+EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
@@ -555,7 +571,7 @@ static int nvme_configure_directives(struct nvme_ctrl *ctrl)
goto out_disable_stream;
}
- ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
+ ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
return 0;
@@ -589,6 +605,14 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
}
+static void nvme_setup_passthrough(struct request *req,
+ struct nvme_command *cmd)
+{
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+ /* passthru commands should let the driver set the SGL flags */
+ cmd->common.flags &= ~NVME_CMD_SGL_ALL;
+}
+
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
@@ -673,7 +697,8 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
}
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_opcode op)
{
struct nvme_ctrl *ctrl = ns->ctrl;
u16 control = 0;
@@ -687,7 +712,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.opcode = op;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -716,6 +741,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
+ if (op == nvme_cmd_zone_append)
+ control |= NVME_RW_APPEND_PIREMAP;
cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
}
@@ -751,11 +778,24 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+ nvme_setup_passthrough(req, cmd);
break;
case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
break;
+ case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_RESET:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
+ break;
case REQ_OP_WRITE_ZEROES:
ret = nvme_setup_write_zeroes(ns, req, cmd);
break;
@@ -763,8 +803,13 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
ret = nvme_setup_discard(ns, req, cmd);
break;
case REQ_OP_READ:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
+ break;
case REQ_OP_WRITE:
- ret = nvme_setup_rw(ns, req, cmd);
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
+ break;
+ case REQ_OP_ZONE_APPEND:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
break;
default:
WARN_ON_ONCE(1);
@@ -884,6 +929,120 @@ out:
return ERR_PTR(ret);
}
+static u32 nvme_known_admin_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_admin_format_nvm:
+ return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_CSE_MASK;
+ case nvme_admin_sanitize_nvm:
+ return NVME_CMD_EFFECTS_CSE_MASK;
+ default:
+ break;
+ }
+ return 0;
+}
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+{
+ u32 effects = 0;
+
+ if (ns) {
+ if (ns->head->effects)
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn(ctrl->device,
+ "IO command:%02x has unhandled effects:%08x\n",
+ opcode, effects);
+ return 0;
+ }
+
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ effects |= nvme_known_admin_effects(opcode);
+
+ return effects;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
+
+static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode)
+{
+ u32 effects = nvme_command_effects(ctrl, ns, opcode);
+
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
+
+static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
+{
+ struct nvme_ns *ns;
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (_nvme_revalidate_disk(ns->disk))
+ nvme_set_queue_dying(ns);
+ else if (blk_queue_is_zoned(ns->disk->queue)) {
+ /*
+ * IO commands are required to fully revalidate a zoned
+ * device. Force the command effects to trigger rescan
+ * work so report zones can run in a context with
+ * unfrozen IO queues.
+ */
+ *effects |= NVME_CMD_EFFECTS_NCC;
+ }
+ up_read(&ctrl->namespaces_rwsem);
+}
+
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+{
+ /*
+ * Revalidate LBA changes prior to unfreezing. This is necessary to
+ * prevent memory corruption if a logical block size was changed by
+ * this command.
+ */
+ if (effects & NVME_CMD_EFFECTS_LBCC)
+ nvme_update_formats(ctrl, &effects);
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
+ mutex_unlock(&ctrl->scan_lock);
+ }
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_identify(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
+ nvme_queue_scan(ctrl);
+ flush_work(&ctrl->scan_work);
+ }
+}
+
+void nvme_execute_passthru_rq(struct request *rq)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct gendisk *disk = ns ? ns->disk : NULL;
+ u32 effects;
+
+ effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ blk_execute_rq(rq->q, disk, rq, 0);
+ nvme_passthru_end(ctrl, effects);
+}
+EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
@@ -922,7 +1081,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
}
}
- blk_execute_rq(req->q, disk, req, 0);
+ nvme_execute_passthru_rq(req);
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
else
@@ -1056,8 +1215,13 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
+static bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
- struct nvme_ns_id_desc *cur)
+ struct nvme_ns_id_desc *cur, bool *csi_seen)
{
const char *warn_str = "ctrl returned bogus length:";
void *data = cur;
@@ -1087,6 +1251,15 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
+ case NVME_NIDT_CSI:
+ if (cur->nidl != NVME_NIDT_CSI_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
+ *csi_seen = true;
+ return NVME_NIDT_CSI_LEN;
default:
/* Skip unknown types */
return cur->nidl;
@@ -1097,10 +1270,12 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
struct nvme_command c = { };
- int status;
+ bool csi_seen = false;
+ int status, pos, len;
void *data;
- int pos;
- int len;
+
+ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
+ return 0;
c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cpu_to_le32(nsid);
@@ -1115,18 +1290,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
- /*
- * Don't treat non-retryable errors as fatal, as we potentially
- * already have a NGUID or EUI-64. If we failed with DNR set,
- * we want to silently ignore the error as we can still
- * identify the device, but if the status has DNR set, we want
- * to propagate the error back specifically for the disk
- * revalidation flow to make sure we don't abandon the
- * device just because of a temporal retry-able error (such
- * as path of transport errors).
- */
- if (status > 0 && (status & NVME_SC_DNR))
- status = 0;
goto free_data;
}
@@ -1136,12 +1299,19 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur);
+ len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
if (len < 0)
- goto free_data;
+ break;
len += sizeof(*cur);
}
+
+ if (nvme_multi_css(ctrl) && !csi_seen) {
+ dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
+ nsid);
+ status = -EINVAL;
+ }
+
free_data:
kfree(data);
return status;
@@ -1330,96 +1500,12 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
-static u32 nvme_known_admin_effects(u8 opcode)
-{
- switch (opcode) {
- case nvme_admin_format_nvm:
- return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
- NVME_CMD_EFFECTS_CSE_MASK;
- case nvme_admin_sanitize_nvm:
- return NVME_CMD_EFFECTS_CSE_MASK;
- default:
- break;
- }
- return 0;
-}
-
-static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 opcode)
-{
- u32 effects = 0;
-
- if (ns) {
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
- if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
- dev_warn(ctrl->device,
- "IO command:%02x has unhandled effects:%08x\n",
- opcode, effects);
- return 0;
- }
-
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
-
- /*
- * For simplicity, IO to all namespaces is quiesced even if the command
- * effects say only one namespace is affected.
- */
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- mutex_lock(&ctrl->scan_lock);
- mutex_lock(&ctrl->subsys->lock);
- nvme_mpath_start_freeze(ctrl->subsys);
- nvme_mpath_wait_freeze(ctrl->subsys);
- nvme_start_freeze(ctrl);
- nvme_wait_freeze(ctrl);
- }
- return effects;
-}
-
-static void nvme_update_formats(struct nvme_ctrl *ctrl)
-{
- struct nvme_ns *ns;
-
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (ns->disk && nvme_revalidate_disk(ns->disk))
- nvme_set_queue_dying(ns);
- up_read(&ctrl->namespaces_rwsem);
-}
-
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
-{
- /*
- * Revalidate LBA changes prior to unfreezing. This is necessary to
- * prevent memory corruption if a logical block size was changed by
- * this command.
- */
- if (effects & NVME_CMD_EFFECTS_LBCC)
- nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- nvme_unfreeze(ctrl);
- nvme_mpath_unfreeze(ctrl->subsys);
- mutex_unlock(&ctrl->subsys->lock);
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
- mutex_unlock(&ctrl->scan_lock);
- }
- if (effects & NVME_CMD_EFFECTS_CCC)
- nvme_init_identify(ctrl);
- if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
- nvme_queue_scan(ctrl);
- flush_work(&ctrl->scan_work);
- }
-}
-
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
- u32 effects;
u64 result;
int status;
@@ -1446,12 +1532,10 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
- nvme_passthru_end(ctrl, effects);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -1467,7 +1551,6 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
- u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1493,12 +1576,10 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
- nvme_passthru_end(ctrl, effects);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -1512,7 +1593,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
* Issue ioctl requests on the first available path. Note that unlike normal
* block layer requests we will not retry failed request on another controller.
*/
-static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
struct nvme_ns_head **head, int *srcu_idx)
{
#ifdef CONFIG_NVME_MULTIPATH
@@ -1532,7 +1613,7 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
return disk->private_data;
}
-static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
+void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
{
if (head)
srcu_read_unlock(&head->srcu, idx);
@@ -1798,7 +1879,7 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
- if (ctrl->vs >= NVME_VS(1, 3, 0))
+ if (ctrl->vs >= NVME_VS(1, 3, 0) || nvme_multi_css(ctrl))
return nvme_identify_ns_descs(ctrl, nsid, ids);
return 0;
}
@@ -1814,7 +1895,8 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
- memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
+ a->csi == b->csi;
}
static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -1924,18 +2006,38 @@ static void nvme_update_disk_info(struct gendisk *disk,
static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
+ unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
u32 iob;
/*
* If identify namespace failed, use default 512 byte block size so
* block layer can use before failing read/write for 0 capacity.
*/
- ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
+ ns->lba_shift = id->lbaf[lbaf].ds;
if (ns->lba_shift == 0)
ns->lba_shift = 9;
+ switch (ns->head->ids.csi) {
+ case NVME_CSI_NVM:
+ break;
+ case NVME_CSI_ZNS:
+ ret = nvme_update_zone_info(disk, ns, lbaf);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "failed to add zoned namespace:%u ret:%d\n",
+ ns->head->ns_id, ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
+ ns->head->ids.csi, ns->head->ns_id);
+ return -ENODEV;
+ }
+
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
is_power_of_2(ctrl->max_hw_sectors))
iob = ctrl->max_hw_sectors;
@@ -1943,7 +2045,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
ns->features = 0;
- ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
/* the PI implementation requires metadata equal t10 pi tuple size */
if (ns->ms == sizeof(struct t10_pi_tuple))
ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
@@ -1979,14 +2081,15 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
- blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
nvme_mpath_update_disk_size(ns->head->disk);
}
#endif
return 0;
}
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int _nvme_revalidate_disk(struct gendisk *disk)
{
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -2034,6 +2137,28 @@ out:
return ret;
}
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+ int ret;
+
+ ret = _nvme_revalidate_disk(disk);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(disk->queue)) {
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+
+ ret = blk_revalidate_disk_zones(disk, NULL);
+ if (!ret)
+ blk_queue_max_zone_append_sectors(disk->queue,
+ ctrl->max_zone_append);
+ }
+#endif
+ return ret;
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2164,6 +2289,7 @@ static const struct block_device_operations nvme_fops = {
.release = nvme_release,
.getgeo = nvme_getgeo,
.revalidate_disk= nvme_revalidate_disk,
+ .report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -2184,11 +2310,13 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
const struct block_device_operations nvme_ns_head_ops = {
.owner = THIS_MODULE,
+ .submit_bio = nvme_ns_head_submit_bio,
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl,
.getgeo = nvme_getgeo,
+ .report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
#endif /* CONFIG_NVME_MULTIPATH */
@@ -2246,12 +2374,7 @@ EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
- /*
- * Default to a 4K page size, with the intention to update this
- * path in the future to accomodate architectures with differing
- * kernel and IO page sizes.
- */
- unsigned dev_page_min, page_shift = 12;
+ unsigned dev_page_min;
int ret;
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
@@ -2261,17 +2384,18 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
}
dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- if (page_shift < dev_page_min) {
+ if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
- 1 << dev_page_min, 1 << page_shift);
+ 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
return -ENODEV;
}
- ctrl->page_size = 1 << page_shift;
-
- ctrl->ctrl_config = NVME_CC_CSS_NVM;
- ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+ if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
+ ctrl->ctrl_config = NVME_CC_CSS_CSI;
+ else
+ ctrl->ctrl_config = NVME_CC_CSS_NVM;
+ ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
ctrl->ctrl_config |= NVME_CC_ENABLE;
@@ -2321,13 +2445,13 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
if (ctrl->max_hw_sectors) {
u32 max_segments =
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+ (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
max_segments = min_not_zero(max_segments, ctrl->max_segments);
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- blk_queue_virt_boundary(q, ctrl->page_size - 1);
+ blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
blk_queue_dma_alignment(q, 7);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
@@ -2818,7 +2942,7 @@ out_unlock:
return ret;
}
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
@@ -2832,27 +2956,55 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ c.get_log_page.csi = csi;
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
+static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi)
{
+ struct nvme_cel *cel, *ret = NULL;
+
+ spin_lock(&ctrl->lock);
+ list_for_each_entry(cel, &ctrl->cels, entry) {
+ if (cel->csi == csi) {
+ ret = cel;
+ break;
+ }
+ }
+ spin_unlock(&ctrl->lock);
+
+ return ret;
+}
+
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ struct nvme_effects_log **log)
+{
+ struct nvme_cel *cel = nvme_find_cel(ctrl, csi);
int ret;
- if (!ctrl->effects)
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+ if (cel)
+ goto out;
- if (!ctrl->effects)
- return 0;
+ cel = kzalloc(sizeof(*cel), GFP_KERNEL);
+ if (!cel)
+ return -ENOMEM;
- ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
- ctrl->effects, sizeof(*ctrl->effects), 0);
+ ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
+ &cel->log, sizeof(cel->log), 0);
if (ret) {
- kfree(ctrl->effects);
- ctrl->effects = NULL;
+ kfree(cel);
+ return ret;
}
- return ret;
+
+ cel->csi = csi;
+
+ spin_lock(&ctrl->lock);
+ list_add_tail(&cel->entry, &ctrl->cels);
+ spin_unlock(&ctrl->lock);
+out:
+ *log = &cel->log;
+ return 0;
}
/*
@@ -2873,7 +3025,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
return ret;
}
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+ ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
@@ -2885,7 +3037,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
- ret = nvme_get_effects_log(ctrl);
+ ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
if (ret < 0)
goto out_free;
}
@@ -2947,7 +3099,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (id->rtd3e) {
/* us -> s */
- u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
+ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
shutdown_timeout, 60);
@@ -3353,6 +3505,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
[NVME_CTRL_DEAD] = "dead",
};
@@ -3405,6 +3558,66 @@ static ssize_t nvme_sysfs_show_address(struct device *dev,
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (ctrl->opts->max_reconnects == -1)
+ return sprintf(buf, "off\n");
+ return sprintf(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ctrl_loss_tmo, err;
+
+ err = kstrtoint(buf, 10, &ctrl_loss_tmo);
+ if (err)
+ return -EINVAL;
+
+ else if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ return count;
+}
+static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
+
+static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->reconnect_delay == -1)
+ return sprintf(buf, "off\n");
+ return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int v;
+ int err;
+
+ err = kstrtou32(buf, 10, &v);
+ if (err)
+ return err;
+
+ ctrl->opts->reconnect_delay = v;
+ return count;
+}
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
+ nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3422,6 +3635,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_sqsize.attr,
&dev_attr_hostnqn.attr,
&dev_attr_hostid.attr,
+ &dev_attr_ctrl_loss_tmo.attr,
+ &dev_attr_reconnect_delay.attr,
NULL
};
@@ -3518,6 +3733,13 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_cleanup_srcu;
}
+ if (head->ids.csi) {
+ ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
+ if (ret)
+ goto out_cleanup_srcu;
+ } else
+ head->effects = ctrl->effects;
+
ret = nvme_mpath_alloc_disk(ctrl, head);
if (ret)
goto out_cleanup_srcu;
@@ -3599,7 +3821,7 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
return nsa->head->ns_id - nsb->head->ns_id;
}
-static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
@@ -3617,6 +3839,7 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
up_read(&ctrl->namespaces_rwsem);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
@@ -3734,7 +3957,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_mpath_clear_current_path(ns);
synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
- if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
+ if (ns->disk->flags & GENHD_FL_UP) {
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -3765,7 +3988,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (ns->disk && revalidate_disk(ns->disk))
+ if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
nvme_put_ns(ns);
} else
@@ -3858,8 +4081,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
- log_size, 0);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
+ NVME_CSI_NVM, log, log_size, 0);
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
@@ -3920,6 +4143,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
+ /* this is a no-op when called from the controller reset handler */
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
+
down_write(&ctrl->namespaces_rwsem);
list_splice_init(&ctrl->namespaces, &ns_list);
up_write(&ctrl->namespaces_rwsem);
@@ -4003,8 +4229,8 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
- sizeof(*log), 0))
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
+ log, sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
}
@@ -4114,8 +4340,7 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- if (ctrl->kato)
- nvme_start_keep_alive(ctrl);
+ nvme_start_keep_alive(ctrl);
nvme_enable_aen(ctrl);
@@ -4141,11 +4366,16 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_ctrl *ctrl =
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
+ struct nvme_cel *cel, *next;
if (subsys && ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
- kfree(ctrl->effects);
+ list_for_each_entry_safe(cel, next, &ctrl->cels, entry) {
+ list_del(&cel->entry);
+ kfree(cel);
+ }
+
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
@@ -4176,6 +4406,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
+ INIT_LIST_HEAD(&ctrl->cels);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
@@ -4354,6 +4585,29 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);
+struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path)
+{
+ struct nvme_ctrl *ctrl;
+ struct file *f;
+
+ f = filp_open(path, O_RDWR, 0);
+ if (IS_ERR(f))
+ return ERR_CAST(f);
+
+ if (f->f_op != &nvme_dev_fops) {
+ ctrl = ERR_PTR(-EINVAL);
+ goto out_close;
+ }
+
+ ctrl = f->private_data;
+ nvme_get_ctrl(ctrl);
+
+out_close:
+ filp_close(f, NULL);
+ return ctrl;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_ctrl_get_by_path, NVME_TARGET_PASSTHRU);
+
/*
* Check we didn't inadvertently grow the command structure sizes:
*/
@@ -4372,6 +4626,8 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2a6c8190eeb7..4ec4829d6233 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -547,7 +547,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
- if (ctrl->state != NVME_CTRL_DELETING &&
+ if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a0ec40ab62ee..a9c1e3b4585e 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -182,7 +182,8 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ if (likely(ctrl->state == NVME_CTRL_LIVE ||
+ ctrl->state == NVME_CTRL_DELETING))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e999a8c4b7e8..eae43bb444e0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -227,6 +227,7 @@ static DECLARE_COMPLETION(nvme_fc_unload_proceed);
*/
static struct device *fc_udev_device;
+static void nvme_fc_complete_rq(struct request *rq);
/* *********************** FC-NVME Port Management ************************ */
@@ -825,6 +826,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
break;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
default:
/* no action to take - let it delete */
break;
@@ -2033,7 +2035,8 @@ done:
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- nvme_end_request(rq, status, result);
+ if (!nvme_end_request(rq, status, result))
+ nvme_fc_complete_rq(rq);
check_error:
if (terminate_assoc)
@@ -2999,8 +3002,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_disconnect_admin_queue;
- ctrl->ctrl.max_hw_sectors =
- (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
+ ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
+ ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
+ (ilog2(SZ_4K) - 9);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 2e6477ed420f..412a6c97c0d8 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -62,7 +62,7 @@ static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
int ret;
ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
- &data->log, sizeof(data->log), 0);
+ NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
return ret <= 0 ? ret : -EIO;
}
@@ -241,7 +241,8 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data);
if (err) {
- dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ dev_warn(ctrl->device,
+ "Failed to read smart log (error %d)\n", err);
devm_kfree(dev, data);
return;
}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 69608755d415..8e562d0f2c30 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -593,8 +593,8 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
dev_meta_off = dev_meta;
ret = nvme_get_log(ctrl, ns->head->ns_id,
- NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
- offset);
+ NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
+ dev_meta, len, offset);
if (ret) {
dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
break;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 66509472fe06..3ded54d2c9c6 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -167,9 +167,18 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
- return ns->ctrl->state != NVME_CTRL_LIVE ||
- test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags);
+ /*
+ * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
+ * still be able to complete assuming that the controller is connected.
+ * Otherwise it will fail immediately and return to the requeue list.
+ */
+ if (ns->ctrl->state != NVME_CTRL_LIVE &&
+ ns->ctrl->state != NVME_CTRL_DELETING)
+ return true;
+ if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
+ test_bit(NVME_NS_REMOVING, &ns->flags))
+ return true;
+ return false;
}
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
@@ -246,6 +255,12 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
fallback = ns;
}
+ /* No optimized path found, re-check the current path */
+ if (!nvme_path_is_disabled(old) &&
+ old->ana_state == NVME_ANA_OPTIMIZED) {
+ found = old;
+ goto out;
+ }
if (!fallback)
return NULL;
found = fallback;
@@ -266,10 +281,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
ns = srcu_dereference(head->current_path[node], &head->srcu);
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
- ns = nvme_round_robin_path(head, node, ns);
- if (unlikely(!ns || !nvme_path_is_optimized(ns)))
- ns = __nvme_find_path(head, node);
+ if (unlikely(!ns))
+ return __nvme_find_path(head, node);
+
+ if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
+ return nvme_round_robin_path(head, node, ns);
+ if (unlikely(!nvme_path_is_optimized(ns)))
+ return __nvme_find_path(head, node);
return ns;
}
@@ -291,8 +309,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
-static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
- struct bio *bio)
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
@@ -301,12 +318,11 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
int srcu_idx;
/*
- * The namespace might be going away and the bio might
- * be moved to a different queue via blk_steal_bios(),
- * so we need to use the bio_split pool from the original
- * queue to allocate the bvecs from.
+ * The namespace might be going away and the bio might be moved to a
+ * different queue via blk_steal_bios(), so we need to use the bio_split
+ * pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -316,7 +332,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
trace_block_bio_remap(bio->bi_disk->queue, bio,
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- ret = direct_make_request(bio);
+ ret = submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@@ -353,7 +369,7 @@ static void nvme_requeue_work(struct work_struct *work)
* path.
*/
bio->bi_disk = head->disk;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
@@ -375,7 +391,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
return 0;
- q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node);
+ q = blk_alloc_queue(ctrl->numa_node);
if (!q)
goto out;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -529,7 +545,7 @@ static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
int error;
mutex_lock(&ctrl->ana_lock);
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -565,6 +581,9 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
nvme_read_ana_log(ctrl);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1de3f9b827aa..ebb8c3ed3885 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -37,6 +37,14 @@ extern unsigned int admin_timeout;
#define NVME_INLINE_METADATA_SG_CNT 1
#endif
+/*
+ * Default to a 4K page size, with the intention to update this
+ * path in the future to accommodate architectures with differing
+ * kernel and IO page sizes.
+ */
+#define NVME_CTRL_PAGE_SHIFT 12
+#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
+
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
@@ -129,6 +137,13 @@ enum nvme_quirks {
* Don't change the value of the temperature threshold feature
*/
NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
+
+ /*
+ * The controller doesn't handle the Identify Namespace
+ * Identification Descriptor list subcommand despite claiming
+ * NVMe 1.3 compliance.
+ */
+ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
};
/*
@@ -173,12 +188,32 @@ static inline u16 nvme_req_qid(struct request *req)
*/
#define NVME_QUIRK_DELAY_AMOUNT 2300
+/*
+ * enum nvme_ctrl_state: Controller state
+ *
+ * @NVME_CTRL_NEW: New controller just allocated, initial state
+ * @NVME_CTRL_LIVE: Controller is connected and I/O capable
+ * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
+ * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
+ * transport
+ * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
+ * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
+ * disabled/failed immediately. This state comes
+ * after all async event processing took place and
+ * before ns removal and the controller deletion
+ * progress
+ * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
+ * shutdown or removal. In this case we forcibly
+ * kill all inflight I/O as they have no chance to
+ * complete
+ */
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
+ NVME_CTRL_DELETING_NOIO,
NVME_CTRL_DEAD,
};
@@ -191,6 +226,12 @@ struct nvme_fault_inject {
#endif
};
+struct nvme_cel {
+ struct list_head entry;
+ struct nvme_effects_log log;
+ u8 csi;
+};
+
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
@@ -228,10 +269,12 @@ struct nvme_ctrl {
u32 queue_count;
u64 cap;
- u32 page_size;
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u32 max_zone_append;
+#endif
u16 crdt[3];
u16 oncs;
u16 oacs;
@@ -257,6 +300,7 @@ struct nvme_ctrl {
unsigned long quirks;
struct nvme_id_power_state psd[32];
struct nvme_effects_log *effects;
+ struct list_head cels;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
@@ -339,6 +383,7 @@ struct nvme_ns_ids {
u8 eui64[8];
u8 nguid[16];
uuid_t uuid;
+ u8 csi;
};
/*
@@ -358,6 +403,7 @@ struct nvme_ns_head {
struct kref ref;
bool shared;
int instance;
+ struct nvme_effects_log *effects;
#ifdef CONFIG_NVME_MULTIPATH
struct gendisk *disk;
struct bio_list requeue_list;
@@ -395,6 +441,9 @@ struct nvme_ns {
u16 sgs;
u32 sws;
u8 pi_type;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
@@ -474,7 +523,7 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1;
}
-static inline void nvme_end_request(struct request *req, __le16 status,
+static inline bool nvme_end_request(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
@@ -483,7 +532,9 @@ static inline void nvme_end_request(struct request *req, __le16 status,
rq->result = result;
/* inject error when permitted by fault injection framework */
nvme_should_fail(req);
- blk_mq_complete_request(req);
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return true;
+ return blk_mq_complete_request_remote(req);
}
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
@@ -558,8 +609,11 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
+struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+ struct nvme_ns_head **head, int *srcu_idx);
+void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
@@ -586,6 +640,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
@@ -694,6 +749,36 @@ static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
+#ifdef CONFIG_BLK_DEV_ZONED
+int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
+ unsigned lbaf);
+
+int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action);
+#else
+#define nvme_report_zones NULL
+
+static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action)
+{
+ return BLK_STS_NOTSUPP;
+}
+
+static inline int nvme_update_zone_info(struct gendisk *disk,
+ struct nvme_ns *ns,
+ unsigned lbaf)
+{
+ dev_warn(ns->ctrl->device,
+ "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
+ return -EPROTONOSUPPORT;
+}
+#endif
+
#ifdef CONFIG_NVM
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
@@ -725,4 +810,11 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl);
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
#endif
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode);
+void nvme_execute_passthru_rq(struct request *rq);
+struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path);
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+void nvme_put_ns(struct nvme_ns *ns);
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b1d18f0633c7..ba725ae47305 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/async.h>
#include <linux/blkdev.h>
@@ -61,10 +62,10 @@ MODULE_PARM_DESC(sgl_threshold,
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set,
- .get = param_get_int,
+ .get = param_get_uint,
};
-static int io_queue_depth = 1024;
+static unsigned int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
@@ -94,6 +95,10 @@ static unsigned int poll_queues;
module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
+static bool noacpi;
+module_param(noacpi, bool, 0444);
+MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
+
struct nvme_dev;
struct nvme_queue;
@@ -115,7 +120,7 @@ struct nvme_dev {
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
- int q_depth;
+ u16 q_depth;
int io_sqes;
u32 db_stride;
void __iomem *bar;
@@ -151,13 +156,14 @@ struct nvme_dev {
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
- int n = 0, ret;
+ int ret;
+ u16 n;
- ret = kstrtoint(val, 10, &n);
+ ret = kstrtou16(val, 10, &n);
if (ret != 0 || n < 2)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -345,10 +351,10 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_npages(unsigned size, struct nvme_dev *dev)
+static int nvme_pci_npages_prp(void)
{
- unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
- dev->ctrl.page_size);
+ unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
@@ -356,22 +362,18 @@ static int nvme_npages(unsigned size, struct nvme_dev *dev)
* Calculates the number of pages needed for the SGL segments. For example a 4k
* page can accommodate 256 SGL descriptors.
*/
-static int nvme_pci_npages_sgl(unsigned int num_seg)
+static int nvme_pci_npages_sgl(void)
{
- return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
+ return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
+ PAGE_SIZE);
}
-static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
- unsigned int size, unsigned int nseg, bool use_sgl)
+static size_t nvme_pci_iod_alloc_size(void)
{
- size_t alloc_size;
+ size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
- if (use_sgl)
- alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
- else
- alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
-
- return alloc_size + sizeof(struct scatterlist) * nseg;
+ return sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -500,9 +502,6 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
- if (nseg == 0)
- return false;
-
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
@@ -517,7 +516,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
int i;
@@ -584,34 +583,33 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
- u32 page_size = dev->ctrl.page_size;
- int offset = dma_addr & (page_size - 1);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
- length -= (page_size - offset);
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
iod->first_dma = 0;
goto done;
}
- dma_len -= (page_size - offset);
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
if (dma_len) {
- dma_addr += (page_size - offset);
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
} else {
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- if (length <= page_size) {
+ if (length <= NVME_CTRL_PAGE_SIZE) {
iod->first_dma = dma_addr;
goto done;
}
- nprps = DIV_ROUND_UP(length, page_size);
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
iod->npages = 0;
@@ -630,7 +628,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
iod->first_dma = prp_dma;
i = 0;
for (;;) {
- if (i == page_size >> 3) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
@@ -641,9 +639,9 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
i = 1;
}
prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= page_size;
- dma_addr += page_size;
- length -= page_size;
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
if (length <= 0)
break;
if (dma_len > 0)
@@ -753,8 +751,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
- unsigned int first_prp_len = dev->ctrl.page_size - offset;
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -764,7 +762,7 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
if (bv->bv_len > first_prp_len)
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
@@ -782,7 +780,7 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -796,7 +794,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
@@ -846,7 +844,7 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
- return 0;
+ return BLK_STS_OK;
}
/*
@@ -963,7 +961,8 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- nvme_end_request(req, cqe->status, cqe->result);
+ if (!nvme_end_request(req, cqe->status, cqe->result))
+ nvme_pci_complete_rq(req);
}
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
@@ -1018,6 +1017,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
static irqreturn_t nvme_irq_check(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
+
if (nvme_cqe_pending(nvmeq))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
@@ -1153,7 +1153,6 @@ static void abort_endio(struct request *req, blk_status_t error)
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{
-
/* If true, indicates loss of adapter communication, possibly by a
* NVMe Subsystem reset.
*/
@@ -1260,9 +1259,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
/*
- * Shutdown the controller immediately and schedule a reset if the
- * command was already aborted once before and still hasn't been
- * returned to the driver, or if this is the admin queue.
+ * Shutdown the controller immediately and schedule a reset if the
+ * command was already aborted once before and still hasn't been
+ * returned to the driver, or if this is the admin queue.
*/
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
@@ -1397,11 +1396,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
{
int q_depth = dev->q_depth;
unsigned q_size_aligned = roundup(q_depth * entry_size,
- dev->ctrl.page_size);
+ NVME_CTRL_PAGE_SIZE);
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
- mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
+
+ mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
q_depth = div_u64(mem_per_q, entry_size);
/*
@@ -1816,6 +1816,7 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
+ u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c;
int ret;
@@ -1824,8 +1825,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
- c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
- ilog2(dev->ctrl.page_size));
+ c.features.dword12 = cpu_to_le32(host_mem_size);
c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
@@ -1845,7 +1845,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
for (i = 0; i < dev->nr_host_mem_descs; i++) {
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
- size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
le64_to_cpu(desc->addr),
@@ -1897,7 +1897,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
break;
descs[i].addr = cpu_to_le64(dma_addr);
- descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
+ descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
i++;
}
@@ -1913,7 +1913,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
out_free_bufs:
while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr),
@@ -1931,12 +1931,12 @@ out:
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
- u32 chunk_size;
+ u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
+ u64 chunk_size;
/* start big and work our way down */
- for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
- chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
- chunk_size /= 2) {
+ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)
return 0;
@@ -2002,7 +2002,7 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
/*
- * If there is no interupt available for queues, ensure that
+ * If there is no interrupt available for queues, ensure that
* the default queue is set to 1. The affinity set size is
* also set to one, but the irq core ignores it for this case.
*
@@ -2260,8 +2260,8 @@ static void nvme_dev_add(struct nvme_dev *dev)
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev->ctrl.numa_node;
- dev->tagset.queue_depth =
- min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
+ BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = sizeof(struct nvme_iod);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2320,7 +2320,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
- dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
@@ -2759,6 +2759,54 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+{
+ struct acpi_device *adev;
+ struct pci_dev *root;
+ acpi_handle handle;
+ acpi_status status;
+ u8 val;
+
+ /*
+ * Look for _DSD property specifying that the storage device on the port
+ * must use D3 to support deep platform power savings during
+ * suspend-to-idle.
+ */
+ root = pcie_find_root_port(dev);
+ if (!root)
+ return false;
+
+ adev = ACPI_COMPANION(&root->dev);
+ if (!adev)
+ return false;
+
+ /*
+ * The property is defined in the PXSX device for South complex ports
+ * and in the PEGP device for North complex ports.
+ */
+ status = acpi_get_handle(adev->handle, "PXSX", &handle);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_get_handle(adev->handle, "PEGP", &handle);
+ if (ACPI_FAILURE(status))
+ return false;
+ }
+
+ if (acpi_bus_get_device(handle, &adev))
+ return false;
+
+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+ &val))
+ return false;
+ return val == 1;
+}
+#else
+static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
@@ -2808,12 +2856,21 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
+ if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+ */
+ dev_info(&pdev->dev,
+ "platform quirk: setting simple suspend\n");
+ quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
+ }
+
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
*/
- alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
- NVME_MAX_SEGS, true);
+ alloc_size = nvme_pci_iod_alloc_size();
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
@@ -2875,6 +2932,7 @@ static void nvme_reset_done(struct pci_dev *pdev)
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
nvme_disable_prepare_reset(dev, true);
}
@@ -3005,6 +3063,7 @@ unfreeze:
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+
return nvme_disable_prepare_reset(ndev, true);
}
@@ -3078,16 +3137,16 @@ static const struct pci_error_handlers nvme_err_handler = {
};
static const struct pci_device_id nvme_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x0953),
+ { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a53),
+ { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a54),
+ { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a55),
+ { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
@@ -3099,6 +3158,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
@@ -3122,6 +3183,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 13506a87a444..44c76ffbb264 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -96,6 +96,7 @@ struct nvme_rdma_queue {
int cm_error;
struct completion cm_done;
bool pi_support;
+ int cq_size;
};
struct nvme_rdma_ctrl {
@@ -149,6 +150,7 @@ MODULE_PARM_DESC(register_always,
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvme_rdma_complete_rq(struct request *rq);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
@@ -274,6 +276,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
init_attr.recv_cq = queue->ib_cq;
if (queue->pi_support)
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ init_attr.qp_context = queue;
ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
@@ -408,6 +411,14 @@ out_err:
return NULL;
}
+static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
+{
+ if (nvme_rdma_poll_queue(queue))
+ ib_free_cq(queue->ib_cq);
+ else
+ ib_cq_pool_put(queue->ib_cq, queue->cq_size);
+}
+
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_device *dev;
@@ -429,7 +440,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
* the destruction of the QP shouldn't use rdma_cm API.
*/
ib_destroy_qp(queue->qp);
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
@@ -449,13 +460,42 @@ static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
}
+static int nvme_rdma_create_cq(struct ib_device *ibdev,
+ struct nvme_rdma_queue *queue)
+{
+ int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
+
+ /*
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
+ */
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
+
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue)) {
+ poll_ctx = IB_POLL_DIRECT;
+ queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
+ comp_vector, poll_ctx);
+ } else {
+ poll_ctx = IB_POLL_SOFTIRQ;
+ queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
+ comp_vector, poll_ctx);
+ }
+
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ return ret;
+ }
+
+ return 0;
+}
+
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{
struct ib_device *ibdev;
const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */
- int comp_vector, idx = nvme_rdma_queue_idx(queue);
- enum ib_poll_context poll_ctx;
int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
@@ -466,26 +506,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
}
ibdev = queue->device->dev;
- /*
- * Spread I/O queues completion vectors according their queue index.
- * Admin queues can always go on completion vector 0.
- */
- comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
-
- /* Polling queues need direct cq polling context */
- if (nvme_rdma_poll_queue(queue))
- poll_ctx = IB_POLL_DIRECT;
- else
- poll_ctx = IB_POLL_SOFTIRQ;
-
/* +1 for ib_stop_cq */
- queue->ib_cq = ib_alloc_cq(ibdev, queue,
- cq_factor * queue->queue_size + 1,
- comp_vector, poll_ctx);
- if (IS_ERR(queue->ib_cq)) {
- ret = PTR_ERR(queue->ib_cq);
+ queue->cq_size = cq_factor * queue->queue_size + 1;
+
+ ret = nvme_rdma_create_cq(ibdev, queue);
+ if (ret)
goto out_put_dev;
- }
ret = nvme_rdma_create_qp(queue, send_wr_factor);
if (ret)
@@ -511,7 +537,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
- queue->queue_size, idx);
+ queue->queue_size, nvme_rdma_queue_idx(queue));
goto out_destroy_ring;
}
@@ -522,7 +548,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize PI MR pool sized %d for QID %d\n",
- queue->queue_size, idx);
+ queue->queue_size, nvme_rdma_queue_idx(queue));
goto out_destroy_mr_pool;
}
}
@@ -539,7 +565,7 @@ out_destroy_ring:
out_destroy_qp:
rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
out_put_dev:
nvme_rdma_dev_put(queue->device);
return ret;
@@ -941,15 +967,20 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
ret = PTR_ERR(ctrl->ctrl.connect_q);
goto out_free_tag_set;
}
- } else {
- blk_mq_update_nr_hw_queues(&ctrl->tag_set,
- ctrl->ctrl.queue_count - 1);
}
ret = nvme_rdma_start_io_queues(ctrl);
if (ret)
goto out_cleanup_connect_q;
+ if (!new) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_wait_freeze(&ctrl->ctrl);
+ blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
+ ctrl->ctrl.queue_count - 1);
+ nvme_unfreeze(&ctrl->ctrl);
+ }
+
return 0;
out_cleanup_connect_q:
@@ -982,6 +1013,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (ctrl->ctrl.queue_count > 1) {
+ nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
if (ctrl->ctrl.tagset) {
@@ -1076,11 +1108,12 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
if (!changed) {
/*
- * state change failure is ok if we're in DELETING state,
+ * state change failure is ok if we started ctrl delete,
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -1133,8 +1166,9 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1149,10 +1183,20 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
queue_work(nvme_reset_wq, &ctrl->err_work);
}
+static void nvme_rdma_end_request(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (!nvme_end_request(rq, req->status, req->result))
+ nvme_rdma_complete_rq(rq);
+}
+
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
const char *op)
{
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -1173,16 +1217,11 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_request *req =
container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
-
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1547,15 +1586,11 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
struct nvme_rdma_request *req =
container_of(qe, struct nvme_rdma_request, sqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "SEND");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
@@ -1697,15 +1732,14 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
return;
}
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ nvme_rdma_end_request(req);
}
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_qe *qe =
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct ib_device *ibdev = queue->device->dev;
struct nvme_completion *cqe = qe->data;
const size_t len = sizeof(struct nvme_completion);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 79ef2b8e2b3c..62fbaecdc960 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -46,6 +46,7 @@ struct nvme_tcp_request {
u32 pdu_sent;
u16 ttag;
struct list_head entry;
+ struct llist_node lentry;
__le32 ddgst;
struct bio *curr_bio;
@@ -75,9 +76,10 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
- spinlock_t lock;
struct mutex send_mutex;
+ struct llist_head req_list;
struct list_head send_list;
+ bool more_requests;
/* recv state */
void *pdu;
@@ -261,15 +263,13 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
- bool sync)
+ bool sync, bool last)
{
struct nvme_tcp_queue *queue = req->queue;
bool empty;
- spin_lock(&queue->lock);
- empty = list_empty(&queue->send_list) && !queue->request;
- list_add_tail(&req->entry, &queue->send_list);
- spin_unlock(&queue->lock);
+ empty = llist_add(&req->lentry, &queue->req_list) &&
+ list_empty(&queue->send_list) && !queue->request;
/*
* if we're the first on the send_list and we can try to send
@@ -278,25 +278,42 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
*/
if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
+ queue->more_requests = !last;
nvme_tcp_try_send(queue);
+ queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
- } else {
+ } else if (last) {
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
}
+static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ struct llist_node *node;
+
+ for (node = llist_del_all(&queue->req_list); node; node = node->next) {
+ req = llist_entry(node, struct nvme_tcp_request, lentry);
+ list_add(&req->entry, &queue->send_list);
+ }
+}
+
static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
- spin_lock(&queue->lock);
req = list_first_entry_or_null(&queue->send_list,
struct nvme_tcp_request, entry);
- if (req)
- list_del(&req->entry);
- spin_unlock(&queue->lock);
+ if (!req) {
+ nvme_tcp_process_req_list(queue);
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (unlikely(!req))
+ return NULL;
+ }
+ list_del(&req->entry);
return req;
}
@@ -464,7 +481,8 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ if (!nvme_end_request(rq, cqe->status, cqe->result))
+ nvme_complete_rq(rq);
queue->nr_cqe++;
return 0;
@@ -595,7 +613,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- nvme_tcp_queue_request(req, false);
+ nvme_tcp_queue_request(req, false, true);
return 0;
}
@@ -654,7 +672,8 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
- nvme_end_request(rq, cpu_to_le16(status << 1), res);
+ if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
+ nvme_complete_rq(rq);
}
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -861,6 +880,12 @@ done:
read_unlock(&sk->sk_callback_lock);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
queue->request = NULL;
@@ -882,7 +907,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
bool last = nvme_tcp_pdu_last_send(req, len);
int ret, flags = MSG_DONTWAIT;
- if (last && !queue->data_digest)
+ if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
flags |= MSG_EOR;
else
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
@@ -929,7 +954,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
int flags = MSG_DONTWAIT;
int ret;
- if (inline_data)
+ if (inline_data || nvme_tcp_queue_more(queue))
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -994,12 +1019,17 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
int ret;
- struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = &req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
+ if (nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
@@ -1342,8 +1372,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
int ret, rcv_pdu_size;
queue->ctrl = ctrl;
+ init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
- spin_lock_init(&queue->lock);
mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue->queue_size = queue_size;
@@ -1382,6 +1412,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (nctrl->opts->tos >= 0)
ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
+ /* Set 10 seconds timeout for icresp recvmsg */
+ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
+
queue->sock->sk->sk_allocation = GFP_ATOMIC;
nvme_tcp_set_queue_io_cpu(queue);
queue->request = NULL;
@@ -1741,15 +1774,20 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
- } else {
- blk_mq_update_nr_hw_queues(ctrl->tagset,
- ctrl->queue_count - 1);
}
ret = nvme_tcp_start_io_queues(ctrl);
if (ret)
goto out_cleanup_connect_q;
+ if (!new) {
+ nvme_start_queues(ctrl);
+ nvme_wait_freeze(ctrl);
+ blk_mq_update_nr_hw_queues(ctrl->tagset,
+ ctrl->queue_count - 1);
+ nvme_unfreeze(ctrl);
+ }
+
return 0;
out_cleanup_connect_q:
@@ -1854,6 +1892,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
+ nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
if (ctrl->tagset) {
@@ -1920,11 +1959,12 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
/*
- * state change failure is ok if we're in DELETING state,
+ * state change failure is ok if we started ctrl delete,
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -1980,8 +2020,9 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->admin_q);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -2016,8 +2057,9 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
nvme_tcp_teardown_ctrl(ctrl, false);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -2104,7 +2146,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
ctrl->async_req.curr_bio = NULL;
ctrl->async_req.data_len = 0;
- nvme_tcp_queue_request(&ctrl->async_req, true);
+ nvme_tcp_queue_request(&ctrl->async_req, true, true);
}
static enum blk_eh_timer_return
@@ -2216,6 +2258,14 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return 0;
}
+static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+
+ if (!llist_empty(&queue->req_list))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2235,7 +2285,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- nvme_tcp_queue_request(req, true);
+ nvme_tcp_queue_request(req, true, bd->last);
return BLK_STS_OK;
}
@@ -2303,6 +2353,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
+ .commit_rqs = nvme_tcp_commit_rqs,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
.exit_request = nvme_tcp_exit_request,
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
new file mode 100644
index 000000000000..57cfd78731fb
--- /dev/null
+++ b/drivers/nvme/host/zns.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include "nvme.h"
+
+static int nvme_set_max_append(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_zns *id;
+ int status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (status) {
+ kfree(id);
+ return status;
+ }
+
+ if (id->zasl)
+ ctrl->max_zone_append = 1 << (id->zasl + 3);
+ else
+ ctrl->max_zone_append = ctrl->max_hw_sectors;
+ kfree(id);
+ return 0;
+}
+
+int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
+ unsigned lbaf)
+{
+ struct nvme_effects_log *log = ns->head->effects;
+ struct request_queue *q = disk->queue;
+ struct nvme_command c = { };
+ struct nvme_id_ns_zns *id;
+ int status;
+
+ /* Driver requires zone append support */
+ if (!(le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
+ NVME_CMD_EFFECTS_CSUPP)) {
+ dev_warn(ns->ctrl->device,
+ "append not supported for zoned namespace:%d\n",
+ ns->head->ns_id);
+ return -EINVAL;
+ }
+
+ /* Lazily query controller append limit for the first zoned namespace */
+ if (!ns->ctrl->max_zone_append) {
+ status = nvme_set_max_append(ns->ctrl);
+ if (status)
+ return status;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id));
+ if (status)
+ goto free_data;
+
+ /*
+ * We currently do not handle devices requiring any of the zoned
+ * operation characteristics.
+ */
+ if (id->zoc) {
+ dev_warn(ns->ctrl->device,
+ "zone operations:%x not supported for namespace:%u\n",
+ le16_to_cpu(id->zoc), ns->head->ns_id);
+ status = -EINVAL;
+ goto free_data;
+ }
+
+ ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
+ if (!is_power_of_2(ns->zsze)) {
+ dev_warn(ns->ctrl->device,
+ "invalid zone size:%llu for namespace:%u\n",
+ ns->zsze, ns->head->ns_id);
+ status = -EINVAL;
+ goto free_data;
+ }
+
+ q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
+ blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
+free_data:
+ kfree(id);
+ return status;
+}
+
+static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
+ unsigned int nr_zones, size_t *buflen)
+{
+ struct request_queue *q = ns->disk->queue;
+ size_t bufsize;
+ void *buf;
+
+ const size_t min_bufsize = sizeof(struct nvme_zone_report) +
+ sizeof(struct nvme_zone_descriptor);
+
+ nr_zones = min_t(unsigned int, nr_zones,
+ get_capacity(ns->disk) >> ilog2(ns->zsze));
+
+ bufsize = sizeof(struct nvme_zone_report) +
+ nr_zones * sizeof(struct nvme_zone_descriptor);
+ bufsize = min_t(size_t, bufsize,
+ queue_max_hw_sectors(q) << SECTOR_SHIFT);
+ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+
+ while (bufsize >= min_bufsize) {
+ buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
+ return NULL;
+}
+
+static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ struct nvme_zone_report *report,
+ size_t buflen)
+{
+ struct nvme_command c = { };
+ int ret;
+
+ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+
+ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ if (ret)
+ return ret;
+
+ return le64_to_cpu(report->nr_zones);
+}
+
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
+ struct nvme_zone_descriptor *entry,
+ unsigned int idx, report_zones_cb cb,
+ void *data)
+{
+ struct blk_zone zone = { };
+
+ if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ entry->zt);
+ return -EINVAL;
+ }
+
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone.cond = entry->zs >> 4;
+ zone.len = ns->zsze;
+ zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+
+ return cb(&zone, idx, data);
+}
+
+static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_zone_report *report;
+ int ret, zone_idx = 0;
+ unsigned int nz, i;
+ size_t buflen;
+
+ report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
+ if (!report)
+ return -ENOMEM;
+
+ sector &= ~(ns->zsze - 1);
+ while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
+ memset(report, 0, buflen);
+ ret = __nvme_ns_report_zones(ns, sector, report, buflen);
+ if (ret < 0)
+ goto out_free;
+
+ nz = min_t(unsigned int, ret, nr_zones);
+ if (!nz)
+ break;
+
+ for (i = 0; i < nz && zone_idx < nr_zones; i++) {
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ zone_idx, cb, data);
+ if (ret)
+ goto out_free;
+ zone_idx++;
+ }
+
+ sector += ns->zsze * nz;
+ }
+
+ if (zone_idx > 0)
+ ret = zone_idx;
+ else
+ ret = -EINVAL;
+out_free:
+ kvfree(report);
+ return ret;
+}
+
+int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
+ int srcu_idx, ret;
+
+ ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ return -EWOULDBLOCK;
+
+ if (ns->head->ids.csi == NVME_CSI_ZNS)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ else
+ ret = -EINVAL;
+ nvme_put_ns_from_disk(head, srcu_idx);
+
+ return ret;
+}
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *c, enum nvme_zone_mgmt_action action)
+{
+ c->zms.opcode = nvme_cmd_zone_mgmt_send;
+ c->zms.nsid = cpu_to_le32(ns->head->ns_id);
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.zsa = action;
+
+ if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
+ c->zms.select_all = 1;
+
+ return BLK_STS_OK;
+}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 4474952d64c6..8056955e652c 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -16,6 +16,18 @@ config NVME_TARGET
To configure the NVMe target you probably want to use the nvmetcli
tool from http://git.infradead.org/users/hch/nvmetcli.git.
+config NVME_TARGET_PASSTHRU
+ bool "NVMe Target Passthrough support"
+ depends on NVME_TARGET
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables target side NVMe passthru controller support for the
+ NVMe Over Fabrics protocol. It allows for hosts to manage and
+ directly access an actual NVMe controller residing on the target
+ side, incuding executing Vendor Unique Commands.
+
+ If unsure, say N.
+
config NVME_TARGET_LOOP
tristate "NVMe loopback device support"
depends on NVME_TARGET
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 2b33836f3d3e..ebf91fc4c72e 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 1db8c0498668..e9fe91786bbb 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -113,11 +113,10 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
u64 data_units_read = 0, data_units_written = 0;
struct nvmet_ns *ns;
struct nvmet_ctrl *ctrl;
+ unsigned long idx;
ctrl = req->sq->ctrl;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@@ -127,9 +126,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
data_units_written += DIV_ROUND_UP(
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
-
}
- rcu_read_unlock();
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -230,14 +227,13 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
- rcu_read_unlock();
}
desc->grpid = cpu_to_le32(grpid);
@@ -427,7 +423,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->awupf = 0;
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
@@ -556,6 +552,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
__le32 *list;
u16 status = 0;
@@ -567,15 +564,13 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
goto out;
}
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
list[i++] = cpu_to_le32(ns->nsid);
if (i == buf_size / sizeof(__le32))
break;
}
- rcu_read_unlock();
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
@@ -754,7 +749,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
return 0;
}
-static void nvmet_execute_set_features(struct nvmet_req *req)
+void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
@@ -829,7 +824,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
}
-static void nvmet_execute_get_features(struct nvmet_req *req)
+void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
@@ -945,6 +940,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
+ if (nvmet_req_passthru_ctrl(req))
+ return nvmet_parse_passthru_admin_cmd(req);
+
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 419e0d4ce79b..74b2b61c773b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -666,6 +666,103 @@ static const struct config_item_type nvmet_namespaces_type = {
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+
+static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
+}
+
+static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+
+ ret = -EBUSY;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+
+ kfree(subsys->passthru_ctrl_path);
+ ret = -ENOMEM;
+ subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+
+ return count;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_passthru_, device_path);
+
+static ssize_t nvmet_passthru_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
+}
+
+static ssize_t nvmet_passthru_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ bool enable;
+ int ret = 0;
+
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_passthru_ctrl_enable(subsys);
+ else
+ nvmet_passthru_ctrl_disable(subsys);
+
+ return ret ? ret : count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, enable);
+
+static struct configfs_attribute *nvmet_passthru_attrs[] = {
+ &nvmet_passthru_attr_device_path,
+ &nvmet_passthru_attr_enable,
+ NULL,
+};
+
+static const struct config_item_type nvmet_passthru_type = {
+ .ct_attrs = nvmet_passthru_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+ config_group_init_type_name(&subsys->passthru_group,
+ "passthru", &nvmet_passthru_type);
+ configfs_add_default_group(&subsys->passthru_group,
+ &subsys->group);
+}
+
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+}
+
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
static int nvmet_port_subsys_allow_link(struct config_item *parent,
struct config_item *target)
{
@@ -862,14 +959,14 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
struct nvmet_subsys *subsys = to_subsys(item);
if (NVME_TERTIARY(subsys->ver))
- return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver),
- (int)NVME_TERTIARY(subsys->ver));
+ return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
- return snprintf(page, PAGE_SIZE, "%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver));
+ return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
}
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
@@ -879,6 +976,10 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
int major, minor, tertiary = 0;
int ret;
+ /* passthru subsystems use the underlying controller's version */
+ if (nvmet_passthru_ctrl(subsys))
+ return -EINVAL;
+
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
if (ret != 2 && ret != 3)
return -EINVAL;
@@ -1121,6 +1222,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
configfs_add_default_group(&subsys->allowed_hosts_group,
&subsys->group);
+ nvmet_add_passthru_group(subsys);
+
return &subsys->group;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6e2f623e472e..b92f45f5cd5b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -115,13 +115,14 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- struct nvmet_ns *ns;
+ unsigned long nsid = 0;
+ struct nvmet_ns *cur;
+ unsigned long idx;
- if (list_empty(&subsys->namespaces))
- return 0;
+ xa_for_each(&subsys->namespaces, idx, cur)
+ nsid = cur->nsid;
- ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
- return ns->nsid;
+ return nsid;
}
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
@@ -336,7 +337,7 @@ int nvmet_enable_port(struct nvmet_port *port)
* If the user requested PI support and the transport isn't pi capable,
* don't enable the port.
*/
- if (port->pi_enable && !ops->metadata_support) {
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
pr_err("T10-PI is not supported by transport type %d\n",
port->disc_addr.trtype);
ret = -EINVAL;
@@ -410,28 +411,13 @@ static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
cancel_delayed_work_sync(&ctrl->ka_work);
}
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
- __le32 nsid)
-{
- struct nvmet_ns *ns;
-
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
- if (ns->nsid == le32_to_cpu(nsid))
- return ns;
- }
-
- return NULL;
-}
-
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
{
struct nvmet_ns *ns;
- rcu_read_lock();
- ns = __nvmet_find_namespace(ctrl, nsid);
+ ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
if (ns)
percpu_ref_get(&ns->ref);
- rcu_read_unlock();
return ns;
}
@@ -467,7 +453,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return -EINVAL;
}
- if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
ns->device_path);
return -EINVAL;
@@ -558,6 +544,12 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
+
+ if (nvmet_passthru_ctrl(subsys)) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
if (ns->enabled)
goto out_unlock;
@@ -586,24 +578,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
- /*
- * The namespaces list needs to be sorted to simplify the implementation
- * of the Identify Namepace List subcommand.
- */
- if (list_empty(&subsys->namespaces)) {
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
- } else {
- struct nvmet_ns *old;
-
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
- lockdep_is_held(&subsys->lock)) {
- BUG_ON(ns->nsid == old->nsid);
- if (ns->nsid < old->nsid)
- break;
- }
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+ if (ret)
+ goto out_restore_subsys_maxnsid;
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
- }
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -612,6 +590,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+
+out_restore_subsys_maxnsid:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -630,7 +612,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
- list_del_rcu(&ns->dev_link);
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
@@ -681,7 +663,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
if (!ns)
return NULL;
- INIT_LIST_HEAD(&ns->dev_link);
init_completion(&ns->disable_done);
ns->nsid = nsid;
@@ -874,6 +855,9 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
+ if (nvmet_req_passthru_ctrl(req))
+ return nvmet_parse_passthru_io_cmd(req);
+
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -1263,14 +1247,14 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct nvmet_req *req)
{
struct nvmet_ns *ns;
+ unsigned long idx;
if (!req->p2p_client)
return;
ctrl->p2p_client = get_device(req->p2p_client);
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
- lockdep_is_held(&ctrl->subsys->lock))
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
@@ -1495,7 +1479,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return ERR_PTR(-ENOMEM);
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&subsys->serial, sizeof(subsys->serial));
@@ -1523,7 +1507,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
- INIT_LIST_HEAD(&subsys->namespaces);
+ xa_init(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
@@ -1535,7 +1519,10 @@ static void nvmet_subsys_free(struct kref *ref)
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+
+ xa_destroy(&subsys->namespaces);
+ nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn);
kfree_rcu(subsys->model, rcuhead);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 40cf0b6e6c9d..f40c05c33c3a 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -277,7 +277,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 27fd3b5aa621..55bafd56166a 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -167,7 +167,6 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
- atomic_t del_work_active;
};
@@ -1090,7 +1089,6 @@ nvmet_fc_delete_assoc(struct work_struct *work)
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
nvmet_fc_delete_target_assoc(assoc);
- atomic_set(&assoc->del_work_active, 0);
nvmet_fc_tgt_a_put(assoc);
}
@@ -1123,7 +1121,6 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
- atomic_set(&assoc->del_work_active, 0);
atomic_set(&assoc->terminating, 0);
while (needrandom) {
@@ -1243,7 +1240,8 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
- nvmet_fc_tgt_a_get(assoc);
+ if (!nvmet_fc_tgt_a_get(assoc))
+ ret = NULL;
break;
}
}
@@ -1477,21 +1475,15 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags;
- int ret;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
- if (ret == 0) {
- if (!schedule_work(&assoc->del_work))
- nvmet_fc_tgt_a_put(assoc);
- } else {
+ if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
- }
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -1533,7 +1525,6 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags;
bool noassoc = true;
- int ret;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
@@ -1545,14 +1536,9 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
- if (ret == 0) {
- if (!schedule_work(&assoc->del_work))
- nvmet_fc_tgt_a_put(assoc);
- } else {
+ if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
- }
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1573,7 +1559,6 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
struct nvmet_fc_tgt_queue *queue;
unsigned long flags;
bool found_ctrl = false;
- int ret;
/* this is a bit ugly, but don't want to make locks layered */
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
@@ -1597,14 +1582,9 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
- if (ret == 0) {
- if (!schedule_work(&assoc->del_work))
- nvmet_fc_tgt_a_put(assoc);
- } else {
+ if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
- }
return;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 2ff1d1334a03..c97e60b71bbc 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -43,6 +43,17 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_ERR, NULL }
};
+static int fcloop_verify_addr(substring_t *s)
+{
+ size_t blen = s->to - s->from + 1;
+
+ if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
+ strncmp(s->from, "0x", 2))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
fcloop_parse_options(struct fcloop_ctrl_options *opts,
const char *buf)
@@ -64,14 +75,16 @@ fcloop_parse_options(struct fcloop_ctrl_options *opts,
opts->mask |= token;
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->wwnn = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -92,14 +105,16 @@ fcloop_parse_options(struct fcloop_ctrl_options *opts,
opts->fcaddr = token;
break;
case NVMF_OPT_LPWWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->lpwwnn = token64;
break;
case NVMF_OPT_LPWWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -141,14 +156,16 @@ fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
token = match_token(p, opt_tokens, args);
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
*nname = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 6344e73c9354..4884ef1e46a2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,7 +36,6 @@ struct nvme_loop_ctrl {
struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl;
- struct nvmet_ctrl *target_ctrl;
struct nvmet_port *port;
};
@@ -116,7 +115,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
return;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ if (!nvme_end_request(rq, cqe->status, cqe->result))
+ nvme_loop_complete_rq(rq);
}
}
@@ -444,7 +444,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
struct nvme_loop_ctrl *ctrl =
container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
- bool changed;
int ret;
nvme_stop_ctrl(&ctrl->ctrl);
@@ -471,8 +470,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
blk_mq_update_nr_hw_queues(&ctrl->tag_set,
ctrl->ctrl.queue_count - 1);
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
nvme_start_ctrl(&ctrl->ctrl);
@@ -567,7 +566,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_loop_ctrl *ctrl;
- bool changed;
int ret;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -583,6 +581,9 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
if (ret)
goto out_put_ctrl;
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ WARN_ON_ONCE(1);
+
ret = -ENOMEM;
ctrl->ctrl.sqsize = opts->queue_size - 1;
@@ -617,8 +618,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
mutex_lock(&nvme_loop_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 809691291e73..47ee3fb193bd 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -21,6 +21,8 @@
#include <linux/radix-tree.h>
#include <linux/t10-pi.h>
+#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
+
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
@@ -52,7 +54,6 @@
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
struct nvmet_ns {
- struct list_head dev_link;
struct percpu_ref ref;
struct block_device *bdev;
struct file *file;
@@ -219,7 +220,7 @@ struct nvmet_subsys {
struct mutex lock;
struct kref ref;
- struct list_head namespaces;
+ struct xarray namespaces;
unsigned int nr_namespaces;
unsigned int max_nsid;
u16 cntlid_min;
@@ -243,6 +244,12 @@ struct nvmet_subsys {
struct config_group allowed_hosts_group;
struct nvmet_subsys_model __rcu *model;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ struct nvme_ctrl *passthru_ctrl;
+ char *passthru_ctrl_path;
+ struct config_group passthru_group;
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -286,8 +293,9 @@ struct nvmet_fabrics_ops {
struct module *owner;
unsigned int type;
unsigned int msdbd;
- bool has_keyed_sgls : 1;
- bool metadata_support : 1;
+ unsigned int flags;
+#define NVMF_KEYED_SGLS (1 << 0)
+#define NVMF_METADATA_SUPPORTED (1 << 1)
void (*queue_response)(struct nvmet_req *req);
int (*add_port)(struct nvmet_port *port);
void (*remove_port)(struct nvmet_port *port);
@@ -321,6 +329,11 @@ struct nvmet_req {
struct bio_vec *bvec;
struct work_struct work;
} f;
+ struct {
+ struct request *rq;
+ struct work_struct work;
+ bool use_workqueue;
+ } p;
};
int sg_cnt;
int metadata_sg_cnt;
@@ -400,6 +413,8 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgls(struct nvmet_req *req);
void nvmet_req_free_sgls(struct nvmet_req *req);
+void nvmet_execute_set_features(struct nvmet_req *req);
+void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
@@ -532,6 +547,43 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
sizeof(struct nvme_dsm_range);
}
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+{
+ return subsys->passthru_ctrl;
+}
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+}
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+}
+static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+{
+ return NULL;
+}
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static inline struct nvme_ctrl *
+nvmet_req_passthru_ctrl(struct nvmet_req *req)
+{
+ return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
+}
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
new file mode 100644
index 000000000000..89d91dc999a6
--- /dev/null
+++ b/drivers/nvme/target/passthru.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target Passthrough command implementation.
+ *
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ * Copyright (c) 2019-2020, Eideticom Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+
+#include "../host/nvme.h"
+#include "nvmet.h"
+
+MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
+
+/*
+ * xarray to maintain one passthru subsystem per nvme controller.
+ */
+static DEFINE_XARRAY(passthru_subsystems);
+
+static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ctrl *id;
+ u32 max_hw_sectors;
+ int page_shift;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
+ if (status)
+ goto out_free;
+
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /*
+ * The passthru NVMe driver may have a limit on the number of segments
+ * which depends on the host's memory fragementation. To solve this,
+ * ensure mdts is limited to the pages equal to the number of segments.
+ */
+ max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
+ pctrl->max_hw_sectors);
+
+ page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
+ id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
+
+ id->acl = 3;
+ /*
+ * We export aerl limit for the fabrics controller, update this when
+ * passthru based aerl support is added.
+ */
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* emulate kas as most of the PCIe ctrl don't have a support for kas */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ /* don't support host memory buffer */
+ id->hmpre = 0;
+ id->hmmin = 0;
+
+ id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
+ id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ /* don't support fuse commands */
+ id->fuses = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ /*
+ * When passsthru controller is setup using nvme-loop transport it will
+ * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
+ * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
+ * code path with duplicate ctr subsynqn. In order to prevent that we
+ * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
+ */
+ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+
+ /* use fabric id-ctrl values */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ req->port->inline_data_size) / 16);
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /* Support multipath connections with fabrics */
+ id->cmic |= 1 << 1;
+
+ /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
+ id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ns *id;
+ int i;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
+ if (status)
+ goto out_free;
+
+ for (i = 0; i < (id->nlbaf + 1); i++)
+ if (id->lbaf[i].ms)
+ memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
+
+ id->flbas = id->flbas & ~(1 << 4);
+
+ /*
+ * Presently the NVMEof target code does not support sending
+ * metadata, so we must disable it here. This should be updated
+ * once target starts supporting metadata.
+ */
+ id->mc = 0;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+ struct request *rq = req->p.rq;
+ u16 status;
+
+ nvme_execute_passthru_rq(rq);
+
+ status = nvme_req(rq)->status;
+ if (status == NVME_SC_SUCCESS &&
+ req->cmd->common.opcode == nvme_admin_identify) {
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ nvmet_passthru_override_id_ctrl(req);
+ break;
+ case NVME_ID_CNS_NS:
+ nvmet_passthru_override_id_ns(req);
+ break;
+ }
+ }
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, status);
+ blk_put_request(rq);
+}
+
+static void nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
+{
+ struct nvmet_req *req = rq->end_io_data;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, nvme_req(rq)->status);
+ blk_put_request(rq);
+}
+
+static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+{
+ int sg_cnt = req->sg_cnt;
+ struct scatterlist *sg;
+ int op_flags = 0;
+ struct bio *bio;
+ int i, ret;
+
+ if (req->cmd->common.opcode == nvme_cmd_flush)
+ op_flags = REQ_FUA;
+ else if (nvme_is_write(req->cmd))
+ op_flags = REQ_SYNC | REQ_IDLE;
+
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio->bi_end_io = bio_put;
+ bio->bi_opf = req_op(rq) | op_flags;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
+ sg->offset) < sg->length) {
+ bio_put(bio);
+ return -EINVAL;
+ }
+ sg_cnt--;
+ }
+
+ ret = blk_rq_append_bio(rq, &bio);
+ if (unlikely(ret)) {
+ bio_put(bio);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct request_queue *q = ctrl->admin_q;
+ struct nvme_ns *ns = NULL;
+ struct request *rq = NULL;
+ u32 effects;
+ u16 status;
+ int ret;
+
+ if (likely(req->sq->qid != 0)) {
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+
+ ns = nvme_find_get_ns(ctrl, nsid);
+ if (unlikely(!ns)) {
+ pr_err("failed to get passthru ns nsid:%u\n", nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto fail_out;
+ }
+
+ q = ns->queue;
+ }
+
+ rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ if (IS_ERR(rq)) {
+ rq = NULL;
+ status = NVME_SC_INTERNAL;
+ goto fail_out;
+ }
+
+ if (req->sg_cnt) {
+ ret = nvmet_passthru_map_sg(req, rq);
+ if (unlikely(ret)) {
+ status = NVME_SC_INTERNAL;
+ goto fail_out;
+ }
+ }
+
+ /*
+ * If there are effects for the command we are about to execute, or
+ * an end_req function we need to use nvme_execute_passthru_rq()
+ * synchronously in a work item seeing the end_req function and
+ * nvme_passthru_end() can't be called in the request done callback
+ * which is typically in interrupt context.
+ */
+ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+ if (req->p.use_workqueue || effects) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+ schedule_work(&req->p.work);
+ } else {
+ rq->end_io_data = req;
+ blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
+ nvmet_passthru_req_done);
+ }
+
+ if (ns)
+ nvme_put_ns(ns);
+
+ return;
+
+fail_out:
+ if (ns)
+ nvme_put_ns(ns);
+ nvmet_req_complete(req, status);
+ blk_put_request(rq);
+}
+
+/*
+ * We need to emulate set host behaviour to ensure that any requested
+ * behaviour of the target's host matches the requested behaviour
+ * of the device's host and fail otherwise.
+ */
+static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_feat_host_behavior *host;
+ u16 status = NVME_SC_INTERNAL;
+ int ret;
+
+ host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
+ if (!host)
+ goto out_complete_req;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ if (ret)
+ goto out_free_host;
+
+ status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
+ if (status)
+ goto out_free_host;
+
+ if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
+ pr_warn("target host has requested different behaviour from the local host\n");
+ status = NVME_SC_INTERNAL;
+ }
+
+out_free_host:
+ kfree(host);
+out_complete_req:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
+{
+ req->p.use_workqueue = false;
+ req->execute = nvmet_passthru_execute_cmd;
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ case nvme_cmd_resv_report:
+ case nvme_cmd_resv_acquire:
+ case nvme_cmd_resv_release:
+ /*
+ * Reservations cannot be supported properly because the
+ * underlying device has no way of differentiating different
+ * hosts that connect via fabrics. This could potentially be
+ * emulated in the future if regular targets grow support for
+ * this feature.
+ */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return nvmet_setup_passthru_command(req);
+}
+
+/*
+ * Only features that are emulated or specifically allowed in the list are
+ * passed down to the controller. This function implements the allow list for
+ * both get and set features.
+ */
+static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ARBITRATION:
+ case NVME_FEAT_POWER_MGMT:
+ case NVME_FEAT_LBA_RANGE:
+ case NVME_FEAT_TEMP_THRESH:
+ case NVME_FEAT_ERR_RECOVERY:
+ case NVME_FEAT_VOLATILE_WC:
+ case NVME_FEAT_WRITE_ATOMIC:
+ case NVME_FEAT_AUTO_PST:
+ case NVME_FEAT_TIMESTAMP:
+ case NVME_FEAT_HCTM:
+ case NVME_FEAT_NOPSC:
+ case NVME_FEAT_RRL:
+ case NVME_FEAT_PLM_CONFIG:
+ case NVME_FEAT_PLM_WINDOW:
+ case NVME_FEAT_HOST_BEHAVIOR:
+ case NVME_FEAT_SANITIZE:
+ case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
+ return nvmet_setup_passthru_command(req);
+
+ case NVME_FEAT_ASYNC_EVENT:
+ /* There is no support for forwarding ASYNC events */
+ case NVME_FEAT_IRQ_COALESCE:
+ case NVME_FEAT_IRQ_CONFIG:
+ /* The IRQ settings will not apply to the target controller */
+ case NVME_FEAT_HOST_MEM_BUF:
+ /*
+ * Any HMB that's set will not be passed through and will
+ * not work as expected
+ */
+ case NVME_FEAT_SW_PROGRESS:
+ /*
+ * The Pre-Boot Software Load Count doesn't make much
+ * sense for a target to export
+ */
+ case NVME_FEAT_RESV_MASK:
+ case NVME_FEAT_RESV_PERSIST:
+ /* No reservations, see nvmet_parse_passthru_io_cmd() */
+ default:
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ /*
+ * Passthru all vendor specific commands
+ */
+ if (req->cmd->common.opcode >= nvme_admin_vendor_start)
+ return nvmet_setup_passthru_command(req);
+
+ switch (req->cmd->common.opcode) {
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_keep_alive:
+ /*
+ * Most PCIe ctrls don't support keep alive cmd, we route keep
+ * alive to the non-passthru mode. In future please change this
+ * code when PCIe ctrls with keep alive support available.
+ */
+ req->execute = nvmet_execute_keep_alive;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_set_features;
+ return NVME_SC_SUCCESS;
+ case NVME_FEAT_HOST_BEHAVIOR:
+ req->execute = nvmet_passthru_set_host_behaviour;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_get_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_get_features;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_identify:
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_NS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_setup_passthru_command(req);
+ }
+ case nvme_admin_get_log_page:
+ return nvmet_setup_passthru_command(req);
+ default:
+ /* Reject commands not in the allowlist above */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
+{
+ struct nvme_ctrl *ctrl;
+ int ret = -EINVAL;
+ void *old;
+
+ mutex_lock(&subsys->lock);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ if (subsys->nr_namespaces) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ pr_err("failed to open nvme controller %s\n",
+ subsys->passthru_ctrl_path);
+
+ goto out_unlock;
+ }
+
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ subsys, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ goto out_put_ctrl;
+ }
+
+ if (old)
+ goto out_put_ctrl;
+
+ subsys->passthru_ctrl = ctrl;
+ subsys->ver = ctrl->vs;
+
+ if (subsys->ver < NVME_VS(1, 2, 1)) {
+ pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
+ NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ subsys->ver = NVME_VS(1, 2, 1);
+ }
+
+ mutex_unlock(&subsys->lock);
+ return 0;
+
+out_put_ctrl:
+ nvme_put_ctrl(ctrl);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ if (subsys->passthru_ctrl) {
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ nvme_put_ctrl(subsys->passthru_ctrl);
+ }
+ subsys->passthru_ctrl = NULL;
+ subsys->ver = NVMET_DEFAULT_VS;
+}
+
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+ kfree(subsys->passthru_ctrl_path);
+}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 76ea23a2c2be..3ccb59260b4a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -752,7 +752,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
u16 status = 0;
WARN_ON(rsp->n_rdma <= 0);
@@ -1008,7 +1008,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_cmd *cmd =
container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct nvmet_rdma_rsp *rsp;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -1258,9 +1258,8 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
*/
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
- queue->cq = ib_alloc_cq(ndev->device, queue,
- nr_cqe + 1, queue->comp_vector,
- IB_POLL_WORKQUEUE);
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ queue->comp_vector, IB_POLL_WORKQUEUE);
if (IS_ERR(queue->cq)) {
ret = PTR_ERR(queue->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
@@ -1322,7 +1321,7 @@ out:
err_destroy_qp:
rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
- ib_free_cq(queue->cq);
+ ib_cq_pool_put(queue->cq, nr_cqe + 1);
goto out;
}
@@ -1332,7 +1331,8 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
if (queue->cm_id)
rdma_destroy_id(queue->cm_id);
ib_destroy_qp(queue->qp);
- ib_free_cq(queue->cq);
+ ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
+ queue->send_queue_size + 1);
}
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
@@ -1970,8 +1970,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
.msdbd = 1,
- .has_keyed_sgls = 1,
- .metadata_support = 1,
+ .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
.add_port = nvmet_rdma_add_port,
.remove_port = nvmet_rdma_remove_port,
.queue_response = nvmet_rdma_queue_response,
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index de9217cfd22d..9eda91162fe4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -459,17 +459,11 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
{
struct llist_node *node;
+ struct nvmet_tcp_cmd *cmd;
- node = llist_del_all(&queue->resp_list);
- if (!node)
- return;
-
- while (node) {
- struct nvmet_tcp_cmd *cmd = llist_entry(node,
- struct nvmet_tcp_cmd, lentry);
-
+ for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
+ cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
list_add(&cmd->entry, &queue->resp_send_list);
- node = node->next;
queue->send_list_len++;
}
}
@@ -1717,7 +1711,6 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
- .has_keyed_sgls = 0,
.add_port = nvmet_tcp_add_port,
.remove_port = nvmet_tcp_remove_port,
.queue_response = nvmet_tcp_queue_response,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index d7b7f6d688e7..954d3b4a52ab 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -7,9 +7,6 @@ menuconfig NVMEM
This framework is designed to provide a generic interface to NVMEM
from both the Linux Kernel and the userspace.
- This driver can also be built as a module. If so, the module
- will be called nvmem_core.
-
If unsure, say no.
if NVMEM
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 927eb5f6003f..6cd3edb2eaf6 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -135,6 +135,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
if (pos >= nvmem->size)
return 0;
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
if (count < nvmem->word_size)
return -EINVAL;
@@ -172,6 +175,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
if (pos >= nvmem->size)
return -EFBIG;
+ if (!IS_ALIGNED(pos, nvmem->stride))
+ return -EINVAL;
+
if (count < nvmem->word_size)
return -EINVAL;
@@ -567,7 +573,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
- * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @config: nvmem device configuration with which nvmem device is created.
*
@@ -629,12 +635,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!config->no_of_node)
nvmem->dev.of_node = config->dev->of_node;
- if (config->id == -1 && config->name) {
+ switch (config->id) {
+ case NVMEM_DEVID_NONE:
dev_set_name(&nvmem->dev, "%s", config->name);
- } else {
+ break;
+ case NVMEM_DEVID_AUTO:
+ dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+ break;
+ default:
dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
+ break;
}
nvmem->read_only = device_property_present(config->dev, "read-only") ||
@@ -722,7 +734,7 @@ static void devm_nvmem_release(struct device *dev, void *res)
/**
* devm_nvmem_register() - Register a managed nvmem device for given
* nvmem_config.
- * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
* @dev: Device that uses the nvmem device.
* @config: nvmem device configuration with which nvmem device is created.
@@ -766,7 +778,7 @@ static int devm_nvmem_match(struct device *dev, void *res, void *data)
* @dev: Device that uses the nvmem device.
* @nvmem: Pointer to previously registered nvmem device.
*
- * Return: Will be an negative on error or a zero on success.
+ * Return: Will be negative on error or zero on success.
*/
int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
{
@@ -1369,7 +1381,22 @@ static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
}
/**
- * nvmem_cell_read_u16() - Read a cell value as an u16
+ * nvmem_cell_read_u8() - Read a cell value as a u8
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+ * @val: pointer to output value.
+ *
+ * Return: 0 on success or negative errno.
+ */
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
+{
+ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
+
+/**
+ * nvmem_cell_read_u16() - Read a cell value as a u16
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
@@ -1384,7 +1411,7 @@ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
/**
- * nvmem_cell_read_u32() - Read a cell value as an u32
+ * nvmem_cell_read_u32() - Read a cell value as a u32
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
@@ -1399,7 +1426,7 @@ int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
/**
- * nvmem_cell_read_u64() - Read a cell value as an u64
+ * nvmem_cell_read_u64() - Read a cell value as a u64
*
* @dev: Device that requests the nvmem cell.
* @cell_id: Name of nvmem cell to read.
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 8682cda448d6..a72704cd0468 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
@@ -141,7 +141,7 @@ static int sdam_probe(struct platform_device *pdev)
sdam->sdam_config.dev = &pdev->dev;
sdam->sdam_config.name = "spmi_sdam";
- sdam->sdam_config.id = pdev->id;
+ sdam->sdam_config.id = NVMEM_DEVID_AUTO;
sdam->sdam_config.owner = THIS_MODULE,
sdam->sdam_config.stride = 1;
sdam->sdam_config.word_size = 1;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 8a91717600be..5e9e60e2e591 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -3,57 +3,350 @@
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
*/
+#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* Blow timer clock frequency in Mhz */
+#define QFPROM_BLOW_TIMER_OFFSET 0x03c
+
+/* Amount of time required to hold charge to blow fuse in micro-seconds */
+#define QFPROM_FUSE_BLOW_POLL_US 100
+#define QFPROM_FUSE_BLOW_TIMEOUT_US 1000
+
+#define QFPROM_BLOW_STATUS_OFFSET 0x048
+#define QFPROM_BLOW_STATUS_BUSY 0x1
+#define QFPROM_BLOW_STATUS_READY 0x0
+
+#define QFPROM_ACCEL_OFFSET 0x044
+
+#define QFPROM_VERSION_OFFSET 0x0
+#define QFPROM_MAJOR_VERSION_SHIFT 28
+#define QFPROM_MAJOR_VERSION_MASK GENMASK(31, QFPROM_MAJOR_VERSION_SHIFT)
+#define QFPROM_MINOR_VERSION_SHIFT 16
+#define QFPROM_MINOR_VERSION_MASK GENMASK(27, QFPROM_MINOR_VERSION_SHIFT)
+
+static bool read_raw_data;
+module_param(read_raw_data, bool, 0644);
+MODULE_PARM_DESC(read_raw_data, "Read raw instead of corrected data");
+/**
+ * struct qfprom_soc_data - config that varies from SoC to SoC.
+ *
+ * @accel_value: Should contain qfprom accel value.
+ * @qfprom_blow_timer_value: The timer value of qfprom when doing efuse blow.
+ * @qfprom_blow_set_freq: The frequency required to set when we start the
+ * fuse blowing.
+ */
+struct qfprom_soc_data {
+ u32 accel_value;
+ u32 qfprom_blow_timer_value;
+ u32 qfprom_blow_set_freq;
+};
+
+/**
+ * struct qfprom_priv - structure holding qfprom attributes
+ *
+ * @qfpraw: iomapped memory space for qfprom-efuse raw address space.
+ * @qfpconf: iomapped memory space for qfprom-efuse configuration address
+ * space.
+ * @qfpcorrected: iomapped memory space for qfprom corrected address space.
+ * @qfpsecurity: iomapped memory space for qfprom security control space.
+ * @dev: qfprom device structure.
+ * @secclk: Clock supply.
+ * @vcc: Regulator supply.
+ * @soc_data: Data that for things that varies from SoC to SoC.
+ */
struct qfprom_priv {
- void __iomem *base;
+ void __iomem *qfpraw;
+ void __iomem *qfpconf;
+ void __iomem *qfpcorrected;
+ void __iomem *qfpsecurity;
+ struct device *dev;
+ struct clk *secclk;
+ struct regulator *vcc;
+ const struct qfprom_soc_data *soc_data;
+};
+
+/**
+ * struct qfprom_touched_values - saved values to restore after blowing
+ *
+ * @clk_rate: The rate the clock was at before blowing.
+ * @accel_val: The value of the accel reg before blowing.
+ * @timer_val: The value of the timer before blowing.
+ */
+struct qfprom_touched_values {
+ unsigned long clk_rate;
+ u32 accel_val;
+ u32 timer_val;
};
+/**
+ * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
+ * @priv: Our driver data.
+ * @old: The data that was stashed from before fuse blowing.
+ *
+ * Resets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors but doesn't return an error code
+ * since there's not much we can do upon failure.
+ */
+static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
+ const struct qfprom_touched_values *old)
+{
+ int ret;
+
+ ret = regulator_disable(priv->vcc);
+ if (ret)
+ dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
+
+ ret = clk_set_rate(priv->secclk, old->clk_rate);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to set clock rate for disable (ignoring)\n");
+
+ clk_disable_unprepare(priv->secclk);
+
+ writel(old->timer_val, priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(old->accel_val, priv->qfpconf + QFPROM_ACCEL_OFFSET);
+}
+
+/**
+ * qfprom_enable_fuse_blowing() - Enable fuse blowing.
+ * @priv: Our driver data.
+ * @old: We'll stash stuff here to use when disabling.
+ *
+ * Sets the value of the blow timer, accel register and the clock
+ * and voltage settings.
+ *
+ * Prints messages if there are errors so caller doesn't need to.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
+ struct qfprom_touched_values *old)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->secclk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ old->clk_rate = clk_get_rate(priv->secclk);
+ ret = clk_set_rate(priv->secclk, priv->soc_data->qfprom_blow_set_freq);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set clock rate for enable\n");
+ goto err_clk_prepared;
+ }
+
+ ret = regulator_enable(priv->vcc);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable regulator\n");
+ goto err_clk_rate_set;
+ }
+
+ old->timer_val = readl(priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ old->accel_val = readl(priv->qfpconf + QFPROM_ACCEL_OFFSET);
+ writel(priv->soc_data->qfprom_blow_timer_value,
+ priv->qfpconf + QFPROM_BLOW_TIMER_OFFSET);
+ writel(priv->soc_data->accel_value,
+ priv->qfpconf + QFPROM_ACCEL_OFFSET);
+
+ return 0;
+
+err_clk_rate_set:
+ clk_set_rate(priv->secclk, old->clk_rate);
+err_clk_prepared:
+ clk_disable_unprepare(priv->secclk);
+ return ret;
+}
+
+/**
+ * qfprom_efuse_reg_write() - Write to fuses.
+ * @context: Our driver data.
+ * @reg: The offset to write at.
+ * @_val: Pointer to data to write.
+ * @bytes: The number of bytes to write.
+ *
+ * Writes to fuses. WARNING: THIS IS PERMANENT.
+ *
+ * Return: 0 or -err.
+ */
+static int qfprom_reg_write(void *context, unsigned int reg, void *_val,
+ size_t bytes)
+{
+ struct qfprom_priv *priv = context;
+ struct qfprom_touched_values old;
+ int words = bytes / 4;
+ u32 *value = _val;
+ u32 blow_status;
+ int ret;
+ int i;
+
+ dev_dbg(priv->dev,
+ "Writing to raw qfprom region : %#010x of size: %zu\n",
+ reg, bytes);
+
+ /*
+ * The hardware only allows us to write word at a time, but we can
+ * read byte at a time. Until the nvmem framework allows a separate
+ * word_size and stride for reading vs. writing, we'll enforce here.
+ */
+ if (bytes % 4) {
+ dev_err(priv->dev,
+ "%zu is not an integral number of words\n", bytes);
+ return -EINVAL;
+ }
+ if (reg % 4) {
+ dev_err(priv->dev,
+ "Invalid offset: %#x. Must be word aligned\n", reg);
+ return -EINVAL;
+ }
+
+ ret = qfprom_enable_fuse_blowing(priv, &old);
+ if (ret)
+ return ret;
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(priv->dev,
+ "Timeout waiting for initial ready; aborting.\n");
+ goto exit_enabled_fuse_blowing;
+ }
+
+ for (i = 0; i < words; i++)
+ writel(value[i], priv->qfpraw + reg + (i * 4));
+
+ ret = readl_relaxed_poll_timeout(
+ priv->qfpconf + QFPROM_BLOW_STATUS_OFFSET,
+ blow_status, blow_status == QFPROM_BLOW_STATUS_READY,
+ QFPROM_FUSE_BLOW_POLL_US, QFPROM_FUSE_BLOW_TIMEOUT_US);
+
+ /* Give an error, but not much we can do in this case */
+ if (ret)
+ dev_err(priv->dev, "Timeout waiting for finish.\n");
+
+exit_enabled_fuse_blowing:
+ qfprom_disable_fuse_blowing(priv, &old);
+
+ return ret;
+}
+
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
+ void __iomem *base = priv->qfpcorrected;
+
+ if (read_raw_data && priv->qfpraw)
+ base = priv->qfpraw;
while (words--)
- *val++ = readb(priv->base + reg + i++);
+ *val++ = readb(base + reg + i++);
return 0;
}
-static struct nvmem_config econfig = {
- .name = "qfprom",
- .stride = 1,
- .word_size = 1,
- .reg_read = qfprom_reg_read,
+static const struct qfprom_soc_data qfprom_7_8_data = {
+ .accel_value = 0xD10,
+ .qfprom_blow_timer_value = 25,
+ .qfprom_blow_set_freq = 4800000,
};
static int qfprom_probe(struct platform_device *pdev)
{
+ struct nvmem_config econfig = {
+ .name = "qfprom",
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+ .reg_read = qfprom_reg_read,
+ };
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct qfprom_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ /* The corrected section is always provided */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ priv->qfpcorrected = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpcorrected))
+ return PTR_ERR(priv->qfpcorrected);
econfig.size = resource_size(res);
econfig.dev = dev;
econfig.priv = priv;
+ priv->dev = dev;
+
+ /*
+ * If more than one region is provided then the OS has the ability
+ * to write.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ u32 version;
+ int major_version, minor_version;
+
+ priv->qfpraw = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpraw))
+ return PTR_ERR(priv->qfpraw);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->qfpconf = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpconf))
+ return PTR_ERR(priv->qfpconf);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ priv->qfpsecurity = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->qfpsecurity))
+ return PTR_ERR(priv->qfpsecurity);
+
+ version = readl(priv->qfpsecurity + QFPROM_VERSION_OFFSET);
+ major_version = (version & QFPROM_MAJOR_VERSION_MASK) >>
+ QFPROM_MAJOR_VERSION_SHIFT;
+ minor_version = (version & QFPROM_MINOR_VERSION_MASK) >>
+ QFPROM_MINOR_VERSION_SHIFT;
+
+ if (major_version == 7 && minor_version == 8)
+ priv->soc_data = &qfprom_7_8_data;
+
+ priv->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(priv->vcc))
+ return PTR_ERR(priv->vcc);
+
+ priv->secclk = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->secclk)) {
+ ret = PTR_ERR(priv->secclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error getting clock: %d\n", ret);
+ return ret;
+ }
+
+ /* Only enable writing if we have SoC data. */
+ if (priv->soc_data)
+ econfig.reg_write = qfprom_reg_write;
+ }
+
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index ab5e7e0bc3d8..c825fc902d10 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -4,12 +4,14 @@
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/nvmem-provider.h>
/* PMIC global registers definition */
#define SC27XX_MODULE_EN 0xc08
+#define SC2730_MODULE_EN 0x1808
#define SC27XX_EFUSE_EN BIT(6)
/* Efuse controller registers definition */
@@ -49,12 +51,29 @@
#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
#define SC27XX_EFUSE_POLL_DELAY_US 10000
+/*
+ * Since different PMICs of SC27xx series can have different
+ * address , we should save address in the device data structure.
+ */
+struct sc27xx_efuse_variant_data {
+ u32 module_en;
+};
+
struct sc27xx_efuse {
struct device *dev;
struct regmap *regmap;
struct hwspinlock *hwlock;
struct mutex mutex;
u32 base;
+ const struct sc27xx_efuse_variant_data *var_data;
+};
+
+static const struct sc27xx_efuse_variant_data sc2731_edata = {
+ .module_en = SC27XX_MODULE_EN,
+};
+
+static const struct sc27xx_efuse_variant_data sc2730_edata = {
+ .module_en = SC2730_MODULE_EN,
};
/*
@@ -119,7 +138,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
return ret;
/* Enable the efuse controller. */
- ret = regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN,
+ ret = regmap_update_bits(efuse->regmap, efuse->var_data->module_en,
SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
if (ret)
goto unlock_efuse;
@@ -169,7 +188,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
disable_efuse:
/* Disable the efuse controller after reading. */
- regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, SC27XX_EFUSE_EN, 0);
+ regmap_update_bits(efuse->regmap, efuse->var_data->module_en, SC27XX_EFUSE_EN, 0);
unlock_efuse:
sc27xx_efuse_unlock(efuse);
@@ -219,6 +238,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
+ efuse->var_data = of_device_get_match_data(&pdev->dev);
econfig.stride = 1;
econfig.word_size = 1;
@@ -238,7 +258,8 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
}
static const struct of_device_id sc27xx_efuse_of_match[] = {
- { .compatible = "sprd,sc2731-efuse" },
+ { .compatible = "sprd,sc2731-efuse", .data = &sc2731_edata},
+ { .compatible = "sprd,sc2730-efuse", .data = &sc2730_edata},
{ }
};
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 925feb21d5ad..59523245db8a 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -378,8 +378,8 @@ static int sprd_efuse_probe(struct platform_device *pdev)
return -ENOMEM;
efuse->base = devm_platform_ioremap_resource(pdev, 0);
- if (!efuse->base)
- return -ENOMEM;
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
ret = of_hwspin_lock_get_id(np, 0);
if (ret < 0) {
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ae03b1218b06..ea44fea99813 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2201,15 +2201,15 @@ int of_find_last_cache_level(unsigned int cpu)
}
/**
- * of_map_rid - Translate a requester ID through a downstream mapping.
+ * of_map_id - Translate an ID through a downstream mapping.
* @np: root complex device node.
- * @rid: device requester ID to map.
+ * @id: device ID to map.
* @map_name: property name of the map to use.
* @map_mask_name: optional property name of the mask to use.
* @target: optional pointer to a target device node.
* @id_out: optional pointer to receive the translated ID.
*
- * Given a device requester ID, look up the appropriate implementation-defined
+ * Given a device ID, look up the appropriate implementation-defined
* platform ID and/or the target device which receives transactions on that
* ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
* @id_out may be NULL if only the other is required. If @target points to
@@ -2219,11 +2219,11 @@ int of_find_last_cache_level(unsigned int cpu)
*
* Return: 0 on success or a standard error code on failure.
*/
-int of_map_rid(struct device_node *np, u32 rid,
+int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
- u32 map_mask, masked_rid;
+ u32 map_mask, masked_id;
int map_len;
const __be32 *map = NULL;
@@ -2235,7 +2235,7 @@ int of_map_rid(struct device_node *np, u32 rid,
if (target)
return -ENODEV;
/* Otherwise, no map implies no translation */
- *id_out = rid;
+ *id_out = id;
return 0;
}
@@ -2255,22 +2255,22 @@ int of_map_rid(struct device_node *np, u32 rid,
if (map_mask_name)
of_property_read_u32(np, map_mask_name, &map_mask);
- masked_rid = map_mask & rid;
+ masked_id = map_mask & id;
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
struct device_node *phandle_node;
- u32 rid_base = be32_to_cpup(map + 0);
+ u32 id_base = be32_to_cpup(map + 0);
u32 phandle = be32_to_cpup(map + 1);
u32 out_base = be32_to_cpup(map + 2);
- u32 rid_len = be32_to_cpup(map + 3);
+ u32 id_len = be32_to_cpup(map + 3);
- if (rid_base & ~map_mask) {
- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ if (id_base & ~map_mask) {
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
np, map_name, map_name,
- map_mask, rid_base);
+ map_mask, id_base);
return -EFAULT;
}
- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ if (masked_id < id_base || masked_id >= id_base + id_len)
continue;
phandle_node = of_find_node_by_phandle(phandle);
@@ -2288,20 +2288,20 @@ int of_map_rid(struct device_node *np, u32 rid,
}
if (id_out)
- *id_out = masked_rid - rid_base + out_base;
+ *id_out = masked_id - id_base + out_base;
- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
- np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, masked_rid - rid_base + out_base);
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
+ np, map_name, map_mask, id_base, out_base,
+ id_len, id, masked_id - id_base + out_base);
return 0;
}
- pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name,
- rid, target && *target ? *target : NULL);
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
+ id, target && *target ? *target : NULL);
/* Bypasses translation */
if (id_out)
- *id_out = rid;
+ *id_out = id;
return 0;
}
-EXPORT_SYMBOL_GPL(of_map_rid);
+EXPORT_SYMBOL_GPL(of_map_id);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 27203bfd0b22..b439c1e05434 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -78,6 +78,7 @@ int of_device_add(struct platform_device *ofdev)
* @np: Pointer to OF node having DMA configuration
* @force_dma: Whether device is to be set up by of_dma_configure() even if
* DMA capability is not explicitly described by firmware.
+ * @id: Optional const pointer value input id
*
* Try to get devices's DMA configuration from DT and update it
* accordingly.
@@ -86,7 +87,8 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
+int of_dma_configure_id(struct device *dev, struct device_node *np,
+ bool force_dma, const u32 *id)
{
u64 dma_addr, paddr, size = 0;
int ret;
@@ -160,7 +162,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
- iommu = of_iommu_configure(dev, np);
+ iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -171,7 +173,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
return 0;
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
+EXPORT_SYMBOL_GPL(of_dma_configure_id);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a296eaf52a5b..25d17b8a1a1a 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -576,55 +576,57 @@ err:
}
}
-static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
- u32 rid_in)
+static u32 __of_msi_map_id(struct device *dev, struct device_node **np,
+ u32 id_in)
{
struct device *parent_dev;
- u32 rid_out = rid_in;
+ u32 id_out = id_in;
/*
* Walk up the device parent links looking for one with a
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
- if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
- "msi-map-mask", np, &rid_out))
+ if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
+ "msi-map-mask", np, &id_out))
break;
- return rid_out;
+ return id_out;
}
/**
- * of_msi_map_rid - Map a MSI requester ID for a device.
+ * of_msi_map_id - Map a MSI ID for a device.
* @dev: device for which the mapping is to be done.
* @msi_np: device node of the expected msi controller.
- * @rid_in: unmapped MSI requester ID for the device.
+ * @id_in: unmapped MSI ID for the device.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @rid_in.
+ * property. If found, apply the mapping to @id_in.
*
- * Returns the mapped MSI requester ID.
+ * Returns the mapped MSI ID.
*/
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
{
- return __of_msi_map_rid(dev, &msi_np, rid_in);
+ return __of_msi_map_id(dev, &msi_np, id_in);
}
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
- * @rid: Requester ID for the device.
+ * @id: Device ID.
+ * @bus_token: Bus token
*
* Walk up the device hierarchy looking for devices with a "msi-map"
* property.
*
* Returns: the MSI domain for this device (or NULL on failure)
*/
-struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
+struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
+ u32 bus_token)
{
struct device_node *np = NULL;
- __of_msi_map_rid(dev, &np, rid);
- return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ __of_msi_map_id(dev, &np, id);
+ return irq_find_matching_host(np, bus_token);
}
/**
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index dfbd3d10410c..0c8c74a3c868 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
*/
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
@@ -2271,6 +2271,7 @@ adjust_put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
/**
* dev_pm_opp_enable() - Enable a specific OPP
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 314f306140a1..0430290670ab 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1209,20 +1209,19 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
/*
* Callback function provided to the Energy Model framework upon registration.
- * This computes the power estimated by @CPU at @kHz if it is the frequency
+ * This computes the power estimated by @dev at @kHz if it is the frequency
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
* frequency and @mW to the associated power. The power is estimated as
- * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
- * the voltage and frequency of the OPP.
+ * P = C * V^2 * f with C being the device's capacitance and V and f
+ * respectively the voltage and frequency of the OPP.
*
- * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
- * calculation failed because of missing parameters, 0 otherwise.
+ * Returns -EINVAL if the power calculation failed because of missing
+ * parameters, 0 otherwise.
*/
-static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
- int cpu)
+static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz,
+ struct device *dev)
{
- struct device *cpu_dev;
struct dev_pm_opp *opp;
struct device_node *np;
unsigned long mV, Hz;
@@ -1230,11 +1229,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
u64 tmp;
int ret;
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev)
- return -ENODEV;
-
- np = of_node_get(cpu_dev->of_node);
+ np = of_node_get(dev->of_node);
if (!np)
return -EINVAL;
@@ -1244,7 +1239,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
return -EINVAL;
Hz = *kHz * 1000;
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+ opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
if (IS_ERR(opp))
return -EINVAL;
@@ -1264,30 +1259,38 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
/**
* dev_pm_opp_of_register_em() - Attempt to register an Energy Model
- * @cpus : CPUs for which an Energy Model has to be registered
+ * @dev : Device for which an Energy Model has to be registered
+ * @cpus : CPUs for which an Energy Model has to be registered. For
+ * other type of devices it should be set to NULL.
*
* This checks whether the "dynamic-power-coefficient" devicetree property has
* been specified, and tries to register an Energy Model with it if it has.
+ * Having this property means the voltages are known for OPPs and the EM
+ * might be calculated.
*/
-void dev_pm_opp_of_register_em(struct cpumask *cpus)
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
{
- struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
- int ret, nr_opp, cpu = cpumask_first(cpus);
- struct device *cpu_dev;
+ struct em_data_callback em_cb = EM_DATA_CB(_get_power);
struct device_node *np;
+ int ret, nr_opp;
u32 cap;
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev)
- return;
+ if (IS_ERR_OR_NULL(dev)) {
+ ret = -EINVAL;
+ goto failed;
+ }
- nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
- if (nr_opp <= 0)
- return;
+ nr_opp = dev_pm_opp_get_opp_count(dev);
+ if (nr_opp <= 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
- np = of_node_get(cpu_dev->of_node);
- if (!np)
- return;
+ np = of_node_get(dev->of_node);
+ if (!np) {
+ ret = -EINVAL;
+ goto failed;
+ }
/*
* Register an EM only if the 'dynamic-power-coefficient' property is
@@ -1298,9 +1301,20 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus)
*/
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
of_node_put(np);
- if (ret || !cap)
- return;
+ if (ret || !cap) {
+ dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus);
+ if (ret)
+ goto failed;
- em_register_perf_domain(cpus, nr_opp, &em_cb);
+ return 0;
+
+failed:
+ dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index e3357e91decb..bd4771f388ab 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon <nm@ti.com>
* Dave Gerlach <d-gerlach@ti.com>
*
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 7e112829d250..5368452eb5a6 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -666,7 +666,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
* @dev: instance of PCI owned by the driver that's asking
* @mask: number of address bits this PCI device can handle
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static int sba_dma_supported( struct device *dev, u64 mask)
{
@@ -698,7 +698,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
* @size: number of bytes to map in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static dma_addr_t
sba_map_single(struct device *dev, void *addr, size_t size,
@@ -788,7 +788,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
* @size: number of bytes mapped in driver buffer.
* @direction: R/W or both.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static void
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
@@ -867,7 +867,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
@@ -898,7 +898,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
@@ -933,7 +933,7 @@ int dump_run_sg = 0;
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
@@ -1017,7 +1017,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
* @nents: number of entries in list
* @direction: R/W or both.
*
- * See Documentation/DMA-API-HOWTO.txt
+ * See Documentation/core-api/dma-api-howto.rst
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 9a64cf90c291..ebec0a6e77ed 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -560,6 +560,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
return -ENODEV;
}
@@ -673,6 +674,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
@@ -680,6 +682,7 @@ static void vmd_remove(struct pci_dev *dev)
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b4c92cee13f8..3365c93abf0e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
struct acpiphp_context *context;
acpi_lock_hp_context();
+
context = acpiphp_get_context(adev);
- if (!context || context->func.parent->is_going_away) {
- acpi_unlock_hp_context();
- return NULL;
+ if (!context)
+ goto unlock;
+
+ if (context->func.parent->is_going_away) {
+ acpiphp_put_context(context);
+ context = NULL;
+ goto unlock;
}
+
get_bridge(context->func.parent);
acpiphp_put_context(context);
+
+unlock:
acpi_unlock_hp_context();
return context;
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 6b43a5455c7a..19aeadb22f11 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1535,8 +1535,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
- iort_msi_map_rid(&pdev->dev, rid);
+ rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ iort_msi_map_id(&pdev->dev, rid);
return rid;
}
@@ -1556,9 +1556,10 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- dom = of_msi_map_get_device_domain(&pdev->dev, rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
if (!dom)
- dom = iort_get_device_domain(&pdev->dev, rid);
+ dom = iort_get_device_domain(&pdev->dev, rid,
+ DOMAIN_BUS_PCI_MSI);
return dom;
}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7224b1e5f2a8..0d85025c55fd 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -527,8 +527,8 @@ static void program_hpx_type3_register(struct pci_dev *dev,
return;
break;
- case HPX_CFG_VEND_CAP: /* Fall through */
- case HPX_CFG_DVSEC: /* Fall through */
+ case HPX_CFG_VEND_CAP:
+ case HPX_CFG_DVSEC:
default:
pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
return;
@@ -1001,7 +1001,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = -EBUSY;
break;
}
- /* Fall through */
+ fallthrough;
case PCI_D0:
case PCI_D1:
case PCI_D2:
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index da6510af1221..449466f71040 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/isolation.h>
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
@@ -333,6 +334,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
const struct pci_device_id *id)
{
int error, node, cpu;
+ int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
struct drv_dev_and_id ddi = { drv, dev, id };
/*
@@ -353,7 +355,8 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
pci_physfn_is_probed(dev))
cpu = nr_cpu_ids;
else
- cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
+ cpu = cpumask_any_and(cpumask_of_node(node),
+ housekeeping_cpumask(hk_flags));
if (cpu < nr_cpu_ids)
error = work_on_cpu(cpu, local_pci_probe, &ddi);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 3acf56683915..14af4c97c626 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1138,7 +1138,7 @@ static irqreturn_t aer_isr(int irq, void *context)
{
struct pcie_device *dev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(dev);
- struct aer_err_source uninitialized_var(e_src);
+ struct aer_err_source e_src;
if (kfifo_is_empty(&rpc->aer_fifo))
return IRQ_NONE;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 812bfc32ecb8..2ea61abd5830 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+ pci_info(dev, "Disabling ASPM L0s/L1\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
* Link bit cleared after starting the link retrain process to allow this
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 4cdb35d166ac..5274f7fe359e 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -756,8 +756,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
- res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
+ smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
if (IS_ERR(smmu_pmu->reg_base))
return PTR_ERR(smmu_pmu->reg_base);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b3ed94b98d9b..de9362c25c07 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -70,5 +70,6 @@ source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
source "drivers/phy/intel/Kconfig"
+source "drivers/phy/xilinx/Kconfig"
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 310c149a9df5..c27408e4daae 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -8,24 +8,25 @@ obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
-obj-$(CONFIG_ARCH_SUNXI) += allwinner/
-obj-$(CONFIG_ARCH_MESON) += amlogic/
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
-obj-$(CONFIG_ARCH_RENESAS) += renesas/
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-y += broadcom/ \
+obj-y += allwinner/ \
+ amlogic/ \
+ broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
intel/ \
lantiq/ \
marvell/ \
+ mediatek/ \
motorola/ \
mscc/ \
qualcomm/ \
ralink/ \
+ renesas/ \
+ rockchip/ \
samsung/ \
socionext/ \
st/ \
- ti/
+ tegra/ \
+ ti/ \
+ xilinx/
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index e760d89d3976..fb584518b2d0 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -22,7 +22,7 @@ config PHY_SUN4I_USB
config PHY_SUN6I_MIPI_DPHY
tristate "Allwinner A31 MIPI D-PHY Support"
depends on ARCH_SUNXI || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && COMMON_CLK
depends on RESET_CONTROLLER
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index e5842e48a5e0..651d5e2a25ce 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -7,7 +7,7 @@
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*
- * Modelled after: Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Modelled after: Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
*/
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
index 79c8af5c7c1d..1fa761ba6cbb 100644
--- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
+++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
@@ -233,7 +233,7 @@ static int sun6i_dphy_exit(struct phy *phy)
}
-static struct phy_ops sun6i_dphy_ops = {
+static const struct phy_ops sun6i_dphy_ops = {
.configure = sun6i_dphy_configure,
.power_on = sun6i_dphy_power_on,
.power_off = sun6i_dphy_power_off,
@@ -241,7 +241,7 @@ static struct phy_ops sun6i_dphy_ops = {
.exit = sun6i_dphy_exit,
};
-static struct regmap_config sun6i_dphy_regmap_config = {
+static const struct regmap_config sun6i_dphy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index b29f11c19155..a1f1a9c90d0d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -2,6 +2,14 @@
#
# Phy drivers for Broadcom platforms
#
+config PHY_BCM63XX_USBH
+ tristate "BCM63xx USBH PHY driver"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ select GENERIC_PHY
+ help
+ Enable this to support the BCM63xx USBH PHY driver.
+ If unsure, say N.
+
config PHY_CYGNUS_PCIE
tristate "Broadcom Cygnus PCIe PHY driver"
depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST)
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index c78de546135c..7024127f86ad 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_BCM63XX_USBH) += phy-bcm63xx-usbh.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
new file mode 100644
index 000000000000..6c05ba8b08be
--- /dev/null
+++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM6328 USBH PHY Controller Driver
+ *
+ * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
+ * Copyright (C) 2015 Simon Arlott
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/kernel/linux/arch/mips/bcm963xx/setup.c:
+ * Copyright (C) 2002 Broadcom Corporation
+ *
+ * Derived from OpenWrt patches:
+ * Copyright (C) 2013 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright (C) 2013 Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* USBH control register offsets */
+enum usbh_regs {
+ USBH_BRT_CONTROL1 = 0,
+ USBH_BRT_CONTROL2,
+ USBH_BRT_STATUS1,
+ USBH_BRT_STATUS2,
+ USBH_UTMI_CONTROL1,
+#define USBH_UC1_DEV_MODE_SEL BIT(0)
+ USBH_TEST_PORT_CONTROL,
+ USBH_PLL_CONTROL1,
+#define USBH_PLLC_REFCLKSEL_SHIFT 0
+#define USBH_PLLC_REFCLKSEL_MASK (0x3 << USBH_PLLC_REFCLKSEL_SHIFT)
+#define USBH_PLLC_CLKSEL_SHIFT 2
+#define USBH_PLLC_CLKSEL_MASK (0x3 << USBH_PLLC_CLKSEL_MASK)
+#define USBH_PLLC_XTAL_PWRDWNB BIT(4)
+#define USBH_PLLC_PLL_PWRDWNB BIT(5)
+#define USBH_PLLC_PLL_CALEN BIT(6)
+#define USBH_PLLC_PHYPLL_BYP BIT(7)
+#define USBH_PLLC_PLL_RESET BIT(8)
+#define USBH_PLLC_PLL_IDDQ_PWRDN BIT(9)
+#define USBH_PLLC_PLL_PWRDN_DELAY BIT(10)
+#define USBH_6318_PLLC_PLL_SUSPEND_EN BIT(27)
+#define USBH_6318_PLLC_PHYPLL_BYP BIT(29)
+#define USBH_6318_PLLC_PLL_RESET BIT(30)
+#define USBH_6318_PLLC_PLL_IDDQ_PWRDN BIT(31)
+ USBH_SWAP_CONTROL,
+#define USBH_SC_OHCI_DATA_SWAP BIT(0)
+#define USBH_SC_OHCI_ENDIAN_SWAP BIT(1)
+#define USBH_SC_OHCI_LOGICAL_ADDR_EN BIT(2)
+#define USBH_SC_EHCI_DATA_SWAP BIT(3)
+#define USBH_SC_EHCI_ENDIAN_SWAP BIT(4)
+#define USBH_SC_EHCI_LOGICAL_ADDR_EN BIT(5)
+#define USBH_SC_USB_DEVICE_SEL BIT(6)
+ USBH_GENERIC_CONTROL,
+#define USBH_GC_PLL_SUSPEND_EN BIT(1)
+ USBH_FRAME_ADJUST_VALUE,
+ USBH_SETUP,
+#define USBH_S_IOC BIT(4)
+#define USBH_S_IPP BIT(5)
+ USBH_MDIO,
+ USBH_MDIO32,
+ USBH_USB_SIM_CONTROL,
+#define USBH_USC_LADDR_SEL BIT(5)
+
+ __USBH_ENUM_SIZE
+};
+
+struct bcm63xx_usbh_phy_variant {
+ /* Registers */
+ long regs[__USBH_ENUM_SIZE];
+
+ /* PLLC bits to set/clear for power on */
+ u32 power_pllc_clr;
+ u32 power_pllc_set;
+
+ /* Setup bits to set/clear for power on */
+ u32 setup_clr;
+ u32 setup_set;
+
+ /* Swap Control bits to set */
+ u32 swapctl_dev_set;
+
+ /* Test Port Control value to set if non-zero */
+ u32 tpc_val;
+
+ /* USB Sim Control bits to set */
+ u32 usc_set;
+
+ /* UTMI Control 1 bits to set */
+ u32 utmictl1_dev_set;
+};
+
+struct bcm63xx_usbh_phy {
+ void __iomem *base;
+ struct clk *usbh_clk;
+ struct clk *usb_ref_clk;
+ struct reset_control *reset;
+ const struct bcm63xx_usbh_phy_variant *variant;
+ bool device_mode;
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6318 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = -1,
+ [USBH_BRT_CONTROL2] = -1,
+ [USBH_BRT_STATUS1] = -1,
+ [USBH_BRT_STATUS2] = -1,
+ [USBH_UTMI_CONTROL1] = 0x2c,
+ [USBH_TEST_PORT_CONTROL] = 0x1c,
+ [USBH_PLL_CONTROL1] = 0x04,
+ [USBH_SWAP_CONTROL] = 0x0c,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = 0x08,
+ [USBH_SETUP] = 0x00,
+ [USBH_MDIO] = 0x14,
+ [USBH_MDIO32] = 0x18,
+ [USBH_USB_SIM_CONTROL] = 0x20,
+ },
+ .power_pllc_clr = USBH_6318_PLLC_PLL_IDDQ_PWRDN,
+ .power_pllc_set = USBH_6318_PLLC_PLL_SUSPEND_EN,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .usc_set = USBH_USC_LADDR_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6328 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = 0x20,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6358 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = -1,
+ [USBH_BRT_CONTROL2] = -1,
+ [USBH_BRT_STATUS1] = -1,
+ [USBH_BRT_STATUS2] = -1,
+ [USBH_UTMI_CONTROL1] = -1,
+ [USBH_TEST_PORT_CONTROL] = 0x24,
+ [USBH_PLL_CONTROL1] = -1,
+ [USBH_SWAP_CONTROL] = 0x00,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = -1,
+ [USBH_SETUP] = -1,
+ [USBH_MDIO] = -1,
+ [USBH_MDIO32] = -1,
+ [USBH_USB_SIM_CONTROL] = -1,
+ },
+ /*
+ * The magic value comes for the original vendor BSP
+ * and is needed for USB to work. Datasheet does not
+ * help, so the magic value is used as-is.
+ */
+ .tpc_val = 0x1c0020,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm6368 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = -1,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .power_pllc_clr = USBH_PLLC_PLL_IDDQ_PWRDN | USBH_PLLC_PLL_PWRDN_DELAY,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static const struct bcm63xx_usbh_phy_variant usbh_bcm63268 = {
+ .regs = {
+ [USBH_BRT_CONTROL1] = 0x00,
+ [USBH_BRT_CONTROL2] = 0x04,
+ [USBH_BRT_STATUS1] = 0x08,
+ [USBH_BRT_STATUS2] = 0x0c,
+ [USBH_UTMI_CONTROL1] = 0x10,
+ [USBH_TEST_PORT_CONTROL] = 0x14,
+ [USBH_PLL_CONTROL1] = 0x18,
+ [USBH_SWAP_CONTROL] = 0x1c,
+ [USBH_GENERIC_CONTROL] = 0x20,
+ [USBH_FRAME_ADJUST_VALUE] = 0x24,
+ [USBH_SETUP] = 0x28,
+ [USBH_MDIO] = 0x2c,
+ [USBH_MDIO32] = 0x30,
+ [USBH_USB_SIM_CONTROL] = 0x34,
+ },
+ .power_pllc_clr = USBH_PLLC_PLL_IDDQ_PWRDN | USBH_PLLC_PLL_PWRDN_DELAY,
+ .setup_clr = USBH_S_IPP,
+ .setup_set = USBH_S_IOC,
+ .swapctl_dev_set = USBH_SC_USB_DEVICE_SEL,
+ .utmictl1_dev_set = USBH_UC1_DEV_MODE_SEL,
+};
+
+static inline bool usbh_has_reg(struct bcm63xx_usbh_phy *usbh, int reg)
+{
+ return (usbh->variant->regs[reg] >= 0);
+}
+
+static inline u32 usbh_readl(struct bcm63xx_usbh_phy *usbh, int reg)
+{
+ return __raw_readl(usbh->base + usbh->variant->regs[reg]);
+}
+
+static inline void usbh_writel(struct bcm63xx_usbh_phy *usbh, int reg,
+ u32 value)
+{
+ __raw_writel(value, usbh->base + usbh->variant->regs[reg]);
+}
+
+static int bcm63xx_usbh_phy_init(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(usbh->usbh_clk);
+ if (ret) {
+ dev_err(&phy->dev, "unable to enable usbh clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(usbh->usb_ref_clk);
+ if (ret) {
+ dev_err(&phy->dev, "unable to enable usb_ref clock: %d\n", ret);
+ clk_disable_unprepare(usbh->usbh_clk);
+ return ret;
+ }
+
+ ret = reset_control_reset(usbh->reset);
+ if (ret) {
+ dev_err(&phy->dev, "unable to reset device: %d\n", ret);
+ clk_disable_unprepare(usbh->usb_ref_clk);
+ clk_disable_unprepare(usbh->usbh_clk);
+ return ret;
+ }
+
+ /* Configure to work in native CPU endian */
+ if (usbh_has_reg(usbh, USBH_SWAP_CONTROL)) {
+ u32 val = usbh_readl(usbh, USBH_SWAP_CONTROL);
+
+ val |= USBH_SC_EHCI_DATA_SWAP;
+ val &= ~USBH_SC_EHCI_ENDIAN_SWAP;
+
+ val |= USBH_SC_OHCI_DATA_SWAP;
+ val &= ~USBH_SC_OHCI_ENDIAN_SWAP;
+
+ if (usbh->device_mode && usbh->variant->swapctl_dev_set)
+ val |= usbh->variant->swapctl_dev_set;
+
+ usbh_writel(usbh, USBH_SWAP_CONTROL, val);
+ }
+
+ if (usbh_has_reg(usbh, USBH_SETUP)) {
+ u32 val = usbh_readl(usbh, USBH_SETUP);
+
+ val |= usbh->variant->setup_set;
+ val &= ~usbh->variant->setup_clr;
+
+ usbh_writel(usbh, USBH_SETUP, val);
+ }
+
+ if (usbh_has_reg(usbh, USBH_USB_SIM_CONTROL)) {
+ u32 val = usbh_readl(usbh, USBH_USB_SIM_CONTROL);
+
+ val |= usbh->variant->usc_set;
+
+ usbh_writel(usbh, USBH_USB_SIM_CONTROL, val);
+ }
+
+ if (usbh->variant->tpc_val &&
+ usbh_has_reg(usbh, USBH_TEST_PORT_CONTROL))
+ usbh_writel(usbh, USBH_TEST_PORT_CONTROL,
+ usbh->variant->tpc_val);
+
+ if (usbh->device_mode &&
+ usbh_has_reg(usbh, USBH_UTMI_CONTROL1) &&
+ usbh->variant->utmictl1_dev_set) {
+ u32 val = usbh_readl(usbh, USBH_UTMI_CONTROL1);
+
+ val |= usbh->variant->utmictl1_dev_set;
+
+ usbh_writel(usbh, USBH_UTMI_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_power_on(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ if (usbh_has_reg(usbh, USBH_PLL_CONTROL1)) {
+ u32 val = usbh_readl(usbh, USBH_PLL_CONTROL1);
+
+ val |= usbh->variant->power_pllc_set;
+ val &= ~usbh->variant->power_pllc_clr;
+
+ usbh_writel(usbh, USBH_PLL_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_power_off(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ if (usbh_has_reg(usbh, USBH_PLL_CONTROL1)) {
+ u32 val = usbh_readl(usbh, USBH_PLL_CONTROL1);
+
+ val &= ~usbh->variant->power_pllc_set;
+ val |= usbh->variant->power_pllc_clr;
+
+ usbh_writel(usbh, USBH_PLL_CONTROL1, val);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_usbh_phy_exit(struct phy *phy)
+{
+ struct bcm63xx_usbh_phy *usbh = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(usbh->usbh_clk);
+ clk_disable_unprepare(usbh->usb_ref_clk);
+
+ return 0;
+}
+
+static const struct phy_ops bcm63xx_usbh_phy_ops = {
+ .exit = bcm63xx_usbh_phy_exit,
+ .init = bcm63xx_usbh_phy_init,
+ .power_off = bcm63xx_usbh_phy_power_off,
+ .power_on = bcm63xx_usbh_phy_power_on,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *bcm63xx_usbh_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct bcm63xx_usbh_phy *usbh = dev_get_drvdata(dev);
+
+ usbh->device_mode = !!args->args[0];
+
+ return of_phy_simple_xlate(dev, args);
+}
+
+static int __init bcm63xx_usbh_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_usbh_phy *usbh;
+ const struct bcm63xx_usbh_phy_variant *variant;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+
+ usbh = devm_kzalloc(dev, sizeof(*usbh), GFP_KERNEL);
+ if (!usbh)
+ return -ENOMEM;
+
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -EINVAL;
+ usbh->variant = variant;
+
+ usbh->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usbh->base))
+ return PTR_ERR(usbh->base);
+
+ usbh->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(usbh->reset)) {
+ if (PTR_ERR(usbh->reset) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(usbh->reset);
+ }
+
+ usbh->usbh_clk = devm_clk_get_optional(dev, "usbh");
+ if (IS_ERR(usbh->usbh_clk))
+ return PTR_ERR(usbh->usbh_clk);
+
+ usbh->usb_ref_clk = devm_clk_get_optional(dev, "usb_ref");
+ if (IS_ERR(usbh->usb_ref_clk))
+ return PTR_ERR(usbh->usb_ref_clk);
+
+ phy = devm_phy_create(dev, NULL, &bcm63xx_usbh_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ platform_set_drvdata(pdev, usbh);
+ phy_set_drvdata(phy, usbh);
+
+ phy_provider = devm_of_phy_provider_register(dev,
+ bcm63xx_usbh_phy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ dev_dbg(dev, "Registered BCM63xx USB PHY driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id bcm63xx_usbh_phy_ids[] __initconst = {
+ { .compatible = "brcm,bcm6318-usbh-phy", .data = &usbh_bcm6318 },
+ { .compatible = "brcm,bcm6328-usbh-phy", .data = &usbh_bcm6328 },
+ { .compatible = "brcm,bcm6358-usbh-phy", .data = &usbh_bcm6358 },
+ { .compatible = "brcm,bcm6362-usbh-phy", .data = &usbh_bcm6368 },
+ { .compatible = "brcm,bcm6368-usbh-phy", .data = &usbh_bcm6368 },
+ { .compatible = "brcm,bcm63268-usbh-phy", .data = &usbh_bcm63268 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_usbh_phy_ids);
+
+static struct platform_driver bcm63xx_usbh_phy_driver __refdata = {
+ .driver = {
+ .name = "bcm63xx-usbh-phy",
+ .of_match_table = bcm63xx_usbh_phy_ids,
+ },
+ .probe = bcm63xx_usbh_phy_probe,
+};
+module_platform_driver(bcm63xx_usbh_phy_driver);
+
+MODULE_DESCRIPTION("BCM63xx USBH PHY driver");
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_AUTHOR("Simon Arlott");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index 1ecbb964cd21..016514e4aa54 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -88,7 +88,7 @@
#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
/* TB_ADDR_TX_RCVDETSC_CTRL */
-#define RXDET_IN_P3_32KHZ BIT(1)
+#define RXDET_IN_P3_32KHZ BIT(0)
struct cdns_reg_pairs {
u16 val;
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 6960dfd8ad8c..0fe408964334 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -41,6 +41,7 @@ struct a38x_comphy_lane {
struct a38x_comphy {
void __iomem *base;
+ void __iomem *conf;
struct device *dev;
struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
};
@@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
{ 0, 0, 3 },
};
+static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable)
+{
+ struct a38x_comphy *priv = lane->priv;
+ u32 conf;
+
+ if (priv->conf) {
+ conf = readl_relaxed(priv->conf);
+ if (enable)
+ conf |= BIT(lane->port);
+ else
+ conf &= ~BIT(lane->port);
+ writel(conf, priv->conf);
+ }
+}
+
static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane,
unsigned int offset, u32 mask, u32 value)
{
@@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
{
struct a38x_comphy_lane *lane = phy_get_drvdata(phy);
unsigned int gen;
+ int ret;
if (mode != PHY_MODE_ETHERNET)
return -EINVAL;
@@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
return -EINVAL;
}
+ a38x_set_conf(lane, false);
+
a38x_comphy_set_speed(lane, gen, gen);
- return a38x_comphy_poll(lane, COMPHY_STAT1,
- COMPHY_STAT1_PLL_RDY_TX |
- COMPHY_STAT1_PLL_RDY_RX,
- COMPHY_STAT1_PLL_RDY_TX |
- COMPHY_STAT1_PLL_RDY_RX);
+ ret = a38x_comphy_poll(lane, COMPHY_STAT1,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX,
+ COMPHY_STAT1_PLL_RDY_TX |
+ COMPHY_STAT1_PLL_RDY_RX);
+
+ if (ret == 0)
+ a38x_set_conf(lane, true);
+
+ return ret;
}
static const struct phy_ops a38x_comphy_ops = {
@@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->dev = &pdev->dev;
priv->base = base;
+ /* Optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf");
+ if (res) {
+ priv->conf = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->conf))
+ return PTR_ERR(priv->conf);
+ }
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct phy *phy;
int ret;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index 23bc3bf5c4c0..8834436bc9db 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -72,7 +72,7 @@ struct mvebu_a3700_utmi_caps {
* struct mvebu_a3700_utmi - PHY driver data
*
* @regs: PHY registers
- * @usb_mis: Regmap with USB miscellaneous registers including PHY ones
+ * @usb_misc: Regmap with USB miscellaneous registers including PHY ones
* @caps: PHY capabilities
* @phy: PHY handle
*/
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 94a34cf75eb3..5172971f4c36 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -178,6 +178,7 @@ static const struct phy_ops gpio_usb_ops = {
/**
* phy_mdm6600_cmd() - send a command request to mdm6600
* @ddata: device driver data
+ * @val: value of cmd to be set
*
* Configures the three command request GPIOs to the specified value.
*/
@@ -194,7 +195,7 @@ static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
/**
* phy_mdm6600_status() - read mdm6600 status lines
- * @ddata: device driver data
+ * @work: work structure
*/
static void phy_mdm6600_status(struct work_struct *work)
{
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a27b8d578d7f..71cb10826326 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -1062,6 +1062,7 @@ EXPORT_SYMBOL_GPL(__of_phy_provider_register);
* __devm_of_phy_provider_register() - create/register phy provider with the
* framework
* @dev: struct device of the phy provider
+ * @children: device node containing children (if different from dev->of_node)
* @owner: the module owner containing of_xlate
* @of_xlate: function pointer to obtain phy instance from phy provider
*
@@ -1117,12 +1118,14 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
/**
* devm_of_phy_provider_unregister() - remove phy provider from the framework
* @dev: struct device of the phy provider
+ * @phy_provider: phy provider returned by of_phy_provider_register()
*
* destroys the devres associated with this phy provider and invokes
* of_phy_provider_unregister to unregister the phy provider.
*/
void devm_of_phy_provider_unregister(struct device *dev,
- struct phy_provider *phy_provider) {
+ struct phy_provider *phy_provider)
+{
int r;
r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 7a33ec12f71b..b88922e7de1d 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1615,7 +1615,7 @@ static struct phy *xgene_phy_xlate(struct device *dev,
if (args->args_count <= 0)
return ERR_PTR(-EINVAL);
- if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX)
+ if (args->args[0] >= MODE_MAX)
return ERR_PTR(-EINVAL);
ctx->mode = args->args[0];
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index ca9ce7e84a5c..928db510b86c 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -59,30 +59,6 @@ config PHY_QCOM_QUSB2
PHY which is usually paired with either the ChipIdea or Synopsys DWC3
USB IPs on MSM SOCs.
-config PHY_QCOM_UFS
- tristate "Qualcomm UFS PHY driver"
- depends on OF && ARCH_QCOM
- select GENERIC_PHY
- help
- Support for UFS PHY on QCOM chipsets.
-
-if PHY_QCOM_UFS
-
-config PHY_QCOM_UFS_14NM
- tristate
- default PHY_QCOM_UFS
- help
- Support for 14nm UFS QMP phy present on QCOM chipsets.
-
-config PHY_QCOM_UFS_20NM
- tristate
- default PHY_QCOM_UFS
- depends on BROKEN
- help
- Support for 20nm UFS QMP phy present on QCOM chipsets.
-
-endif
-
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
@@ -128,3 +104,13 @@ config PHY_QCOM_USB_SS
help
Enable this to support the Super-Speed USB transceiver on various
Qualcomm chipsets.
+
+config PHY_QCOM_IPQ806X_USB
+ tristate "Qualcomm IPQ806x DWC3 USB PHY driver"
+ depends on HAS_IOMEM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for the Synopsis PHYs present inside the
+ Qualcomm USB3.0 DWC3 controller on ipq806x SoC. This driver supports
+ both HS and SS PHY controllers.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 86fb32efab79..47acbd7daa3a 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -6,11 +6,9 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
obj-$(CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2)+= phy-qcom-snps-femto-v2.o
+obj-$(CONFIG_PHY_QCOM_IPQ806X_USB) += phy-qcom-ipq806x-usb.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
new file mode 100644
index 000000000000..71f257b4a7f5
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/* USB QSCRATCH Hardware registers */
+#define QSCRATCH_GENERAL_CFG (0x08)
+#define HSUSB_PHY_CTRL_REG (0x10)
+
+/* PHY_CTRL_REG */
+#define HSUSB_CTRL_DMSEHV_CLAMP BIT(24)
+#define HSUSB_CTRL_USB2_SUSPEND BIT(23)
+#define HSUSB_CTRL_UTMI_CLK_EN BIT(21)
+#define HSUSB_CTRL_UTMI_OTG_VBUS_VALID BIT(20)
+#define HSUSB_CTRL_USE_CLKCORE BIT(18)
+#define HSUSB_CTRL_DPSEHV_CLAMP BIT(17)
+#define HSUSB_CTRL_COMMONONN BIT(11)
+#define HSUSB_CTRL_ID_HV_CLAMP BIT(9)
+#define HSUSB_CTRL_OTGSESSVLD_CLAMP BIT(8)
+#define HSUSB_CTRL_CLAMP_EN BIT(7)
+#define HSUSB_CTRL_RETENABLEN BIT(1)
+#define HSUSB_CTRL_POR BIT(0)
+
+/* QSCRATCH_GENERAL_CFG */
+#define HSUSB_GCFG_XHCI_REV BIT(2)
+
+/* USB QSCRATCH Hardware registers */
+#define SSUSB_PHY_CTRL_REG (0x00)
+#define SSUSB_PHY_PARAM_CTRL_1 (0x04)
+#define SSUSB_PHY_PARAM_CTRL_2 (0x08)
+#define CR_PROTOCOL_DATA_IN_REG (0x0c)
+#define CR_PROTOCOL_DATA_OUT_REG (0x10)
+#define CR_PROTOCOL_CAP_ADDR_REG (0x14)
+#define CR_PROTOCOL_CAP_DATA_REG (0x18)
+#define CR_PROTOCOL_READ_REG (0x1c)
+#define CR_PROTOCOL_WRITE_REG (0x20)
+
+/* PHY_CTRL_REG */
+#define SSUSB_CTRL_REF_USE_PAD BIT(28)
+#define SSUSB_CTRL_TEST_POWERDOWN BIT(27)
+#define SSUSB_CTRL_LANE0_PWR_PRESENT BIT(24)
+#define SSUSB_CTRL_SS_PHY_EN BIT(8)
+#define SSUSB_CTRL_SS_PHY_RESET BIT(7)
+
+/* SSPHY control registers - Does this need 0x30? */
+#define SSPHY_CTRL_RX_OVRD_IN_HI(lane) (0x1006 + 0x100 * (lane))
+#define SSPHY_CTRL_TX_OVRD_DRV_LO(lane) (0x1002 + 0x100 * (lane))
+
+/* SSPHY SoC version specific values */
+#define SSPHY_RX_EQ_VALUE 4 /* Override value for rx_eq */
+/* Override value for transmit preemphasis */
+#define SSPHY_TX_DEEMPH_3_5DB 23
+/* Override value for mpll */
+#define SSPHY_MPLL_VALUE 0
+
+/* QSCRATCH PHY_PARAM_CTRL1 fields */
+#define PHY_PARAM_CTRL1_TX_FULL_SWING_MASK GENMASK(26, 19)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK GENMASK(19, 13)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK GENMASK(13, 7)
+#define PHY_PARAM_CTRL1_LOS_BIAS_MASK GENMASK(7, 2)
+
+#define PHY_PARAM_CTRL1_MASK \
+ (PHY_PARAM_CTRL1_TX_FULL_SWING_MASK | \
+ PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK | \
+ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK | \
+ PHY_PARAM_CTRL1_LOS_BIAS_MASK)
+
+#define PHY_PARAM_CTRL1_TX_FULL_SWING(x) \
+ (((x) << 20) & PHY_PARAM_CTRL1_TX_FULL_SWING_MASK)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_6DB(x) \
+ (((x) << 14) & PHY_PARAM_CTRL1_TX_DEEMPH_6DB_MASK)
+#define PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(x) \
+ (((x) << 8) & PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB_MASK)
+#define PHY_PARAM_CTRL1_LOS_BIAS(x) \
+ (((x) << 3) & PHY_PARAM_CTRL1_LOS_BIAS_MASK)
+
+/* RX OVRD IN HI bits */
+#define RX_OVRD_IN_HI_RX_RESET_OVRD BIT(13)
+#define RX_OVRD_IN_HI_RX_RX_RESET BIT(12)
+#define RX_OVRD_IN_HI_RX_EQ_OVRD BIT(11)
+#define RX_OVRD_IN_HI_RX_EQ_MASK GENMASK(10, 7)
+#define RX_OVRD_IN_HI_RX_EQ(x) ((x) << 8)
+#define RX_OVRD_IN_HI_RX_EQ_EN_OVRD BIT(7)
+#define RX_OVRD_IN_HI_RX_EQ_EN BIT(6)
+#define RX_OVRD_IN_HI_RX_LOS_FILTER_OVRD BIT(5)
+#define RX_OVRD_IN_HI_RX_LOS_FILTER_MASK GENMASK(4, 2)
+#define RX_OVRD_IN_HI_RX_RATE_OVRD BIT(2)
+#define RX_OVRD_IN_HI_RX_RATE_MASK GENMASK(2, 0)
+
+/* TX OVRD DRV LO register bits */
+#define TX_OVRD_DRV_LO_AMPLITUDE_MASK GENMASK(6, 0)
+#define TX_OVRD_DRV_LO_PREEMPH_MASK GENMASK(13, 6)
+#define TX_OVRD_DRV_LO_PREEMPH(x) ((x) << 7)
+#define TX_OVRD_DRV_LO_EN BIT(14)
+
+/* MPLL bits */
+#define SSPHY_MPLL_MASK GENMASK(8, 5)
+#define SSPHY_MPLL(x) ((x) << 5)
+
+/* SS CAP register bits */
+#define SS_CR_CAP_ADDR_REG BIT(0)
+#define SS_CR_CAP_DATA_REG BIT(0)
+#define SS_CR_READ_REG BIT(0)
+#define SS_CR_WRITE_REG BIT(0)
+
+struct usb_phy {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *xo_clk;
+ struct clk *ref_clk;
+ u32 rx_eq;
+ u32 tx_deamp_3_5db;
+ u32 mpll;
+};
+
+struct phy_drvdata {
+ struct phy_ops ops;
+ u32 clk_rate;
+};
+
+/**
+ * Write register and read back masked value to confirm it is written
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask specifying what should be updated
+ * @val - value to write.
+ */
+static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
+ u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val, tmp = readl(phy_dwc3->base + offset);
+
+ tmp &= ~mask; /* retain other bits */
+ write_val = tmp | val;
+
+ writel(write_val, phy_dwc3->base + offset);
+
+ /* Read back to see if val was written */
+ tmp = readl(phy_dwc3->base + offset);
+ tmp &= mask; /* clear other bits */
+
+ if (tmp != val)
+ dev_err(phy_dwc3->dev, "write: %x to QSCRATCH: %x FAILED\n", val, offset);
+}
+
+static int wait_for_latch(void __iomem *addr)
+{
+ u32 retry = 10;
+
+ while (true) {
+ if (!readl(addr))
+ break;
+
+ if (--retry == 0)
+ return -ETIMEDOUT;
+
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+/**
+ * Write SSPHY register
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @addr - SSPHY address to write.
+ * @val - value to write.
+ */
+static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
+ u32 addr, u32 val)
+{
+ int ret;
+
+ writel(addr, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_ADDR_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+ if (ret)
+ goto err_wait;
+
+ writel(val, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_DATA_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_DATA_REG);
+ if (ret)
+ goto err_wait;
+
+ writel(SS_CR_WRITE_REG, phy_dwc3->base + CR_PROTOCOL_WRITE_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_WRITE_REG);
+
+err_wait:
+ if (ret)
+ dev_err(phy_dwc3->dev, "timeout waiting for latch\n");
+ return ret;
+}
+
+/**
+ * Read SSPHY register.
+ *
+ * @base - QCOM DWC3 PHY base virtual address.
+ * @addr - SSPHY address to read.
+ */
+static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
+ u32 addr, u32 *val)
+{
+ int ret;
+
+ writel(addr, phy_dwc3->base + CR_PROTOCOL_DATA_IN_REG);
+ writel(SS_CR_CAP_ADDR_REG,
+ phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_CAP_ADDR_REG);
+ if (ret)
+ goto err_wait;
+
+ /*
+ * Due to hardware bug, first read of SSPHY register might be
+ * incorrect. Hence as workaround, SW should perform SSPHY register
+ * read twice, but use only second read and ignore first read.
+ */
+ writel(SS_CR_READ_REG, phy_dwc3->base + CR_PROTOCOL_READ_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_READ_REG);
+ if (ret)
+ goto err_wait;
+
+ /* throwaway read */
+ readl(phy_dwc3->base + CR_PROTOCOL_DATA_OUT_REG);
+
+ writel(SS_CR_READ_REG, phy_dwc3->base + CR_PROTOCOL_READ_REG);
+
+ ret = wait_for_latch(phy_dwc3->base + CR_PROTOCOL_READ_REG);
+ if (ret)
+ goto err_wait;
+
+ *val = readl(phy_dwc3->base + CR_PROTOCOL_DATA_OUT_REG);
+
+err_wait:
+ return ret;
+}
+
+static int qcom_ipq806x_usb_hs_phy_init(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+ int ret;
+ u32 val;
+
+ ret = clk_prepare_enable(phy_dwc3->xo_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy_dwc3->ref_clk);
+ if (ret) {
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+ return ret;
+ }
+
+ /*
+ * HSPHY Initialization: Enable UTMI clock, select 19.2MHz fsel
+ * enable clamping, and disable RETENTION (power-on default is ENABLED)
+ */
+ val = HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_DMSEHV_CLAMP |
+ HSUSB_CTRL_RETENABLEN | HSUSB_CTRL_COMMONONN |
+ HSUSB_CTRL_OTGSESSVLD_CLAMP | HSUSB_CTRL_ID_HV_CLAMP |
+ HSUSB_CTRL_DPSEHV_CLAMP | HSUSB_CTRL_UTMI_OTG_VBUS_VALID |
+ HSUSB_CTRL_UTMI_CLK_EN | HSUSB_CTRL_CLAMP_EN | 0x70;
+
+ /* use core clock if external reference is not present */
+ if (!phy_dwc3->xo_clk)
+ val |= HSUSB_CTRL_USE_CLKCORE;
+
+ writel(val, phy_dwc3->base + HSUSB_PHY_CTRL_REG);
+ usleep_range(2000, 2200);
+
+ /* Disable (bypass) VBUS and ID filters */
+ writel(HSUSB_GCFG_XHCI_REV, phy_dwc3->base + QSCRATCH_GENERAL_CFG);
+
+ return 0;
+}
+
+static int qcom_ipq806x_usb_hs_phy_exit(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(phy_dwc3->ref_clk);
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+
+ return 0;
+}
+
+static int qcom_ipq806x_usb_ss_phy_init(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+ int ret;
+ u32 data;
+
+ ret = clk_prepare_enable(phy_dwc3->xo_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy_dwc3->ref_clk);
+ if (ret) {
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+ return ret;
+ }
+
+ /* reset phy */
+ data = readl(phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+ writel(data | SSUSB_CTRL_SS_PHY_RESET,
+ phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+ usleep_range(2000, 2200);
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /* clear REF_PAD if we don't have XO clk */
+ if (!phy_dwc3->xo_clk)
+ data &= ~SSUSB_CTRL_REF_USE_PAD;
+ else
+ data |= SSUSB_CTRL_REF_USE_PAD;
+
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /* wait for ref clk to become stable, this can take up to 30ms */
+ msleep(30);
+
+ data |= SSUSB_CTRL_SS_PHY_EN | SSUSB_CTRL_LANE0_PWR_PRESENT;
+ writel(data, phy_dwc3->base + SSUSB_PHY_CTRL_REG);
+
+ /*
+ * WORKAROUND: There is SSPHY suspend bug due to which USB enumerates
+ * in HS mode instead of SS mode. Workaround it by asserting
+ * LANE0.TX_ALT_BLOCK.EN_ALT_BUS to enable TX to use alt bus mode
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3, 0x102D, &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data |= (1 << 7);
+ ret = usb_ss_write_phycreg(phy_dwc3, 0x102D, data);
+ if (ret)
+ goto err_phy_trans;
+
+ ret = usb_ss_read_phycreg(phy_dwc3, 0x1010, &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~0xff0;
+ data |= 0x20;
+ ret = usb_ss_write_phycreg(phy_dwc3, 0x1010, data);
+ if (ret)
+ goto err_phy_trans;
+
+ /*
+ * Fix RX Equalization setting as follows
+ * LANE0.RX_OVRD_IN_HI. RX_EQ_EN set to 0
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_EN_OVRD set to 1
+ * LANE0.RX_OVRD_IN_HI.RX_EQ set based on SoC version
+ * LANE0.RX_OVRD_IN_HI.RX_EQ_OVRD set to 1
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3, SSPHY_CTRL_RX_OVRD_IN_HI(0), &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~RX_OVRD_IN_HI_RX_EQ_EN;
+ data |= RX_OVRD_IN_HI_RX_EQ_EN_OVRD;
+ data &= ~RX_OVRD_IN_HI_RX_EQ_MASK;
+ data |= RX_OVRD_IN_HI_RX_EQ(phy_dwc3->rx_eq);
+ data |= RX_OVRD_IN_HI_RX_EQ_OVRD;
+ ret = usb_ss_write_phycreg(phy_dwc3,
+ SSPHY_CTRL_RX_OVRD_IN_HI(0), data);
+ if (ret)
+ goto err_phy_trans;
+
+ /*
+ * Set EQ and TX launch amplitudes as follows
+ * LANE0.TX_OVRD_DRV_LO.PREEMPH set based on SoC version
+ * LANE0.TX_OVRD_DRV_LO.AMPLITUDE set to 110
+ * LANE0.TX_OVRD_DRV_LO.EN set to 1.
+ */
+ ret = usb_ss_read_phycreg(phy_dwc3,
+ SSPHY_CTRL_TX_OVRD_DRV_LO(0), &data);
+ if (ret)
+ goto err_phy_trans;
+
+ data &= ~TX_OVRD_DRV_LO_PREEMPH_MASK;
+ data |= TX_OVRD_DRV_LO_PREEMPH(phy_dwc3->tx_deamp_3_5db);
+ data &= ~TX_OVRD_DRV_LO_AMPLITUDE_MASK;
+ data |= 0x6E;
+ data |= TX_OVRD_DRV_LO_EN;
+ ret = usb_ss_write_phycreg(phy_dwc3,
+ SSPHY_CTRL_TX_OVRD_DRV_LO(0), data);
+ if (ret)
+ goto err_phy_trans;
+
+ data = 0;
+ data &= ~SSPHY_MPLL_MASK;
+ data |= SSPHY_MPLL(phy_dwc3->mpll);
+ usb_ss_write_phycreg(phy_dwc3, 0x30, data);
+
+ /*
+ * Set the QSCRATCH PHY_PARAM_CTRL1 parameters as follows
+ * TX_FULL_SWING [26:20] amplitude to 110
+ * TX_DEEMPH_6DB [19:14] to 32
+ * TX_DEEMPH_3_5DB [13:8] set based on SoC version
+ * LOS_BIAS [7:3] to 9
+ */
+ data = readl(phy_dwc3->base + SSUSB_PHY_PARAM_CTRL_1);
+
+ data &= ~PHY_PARAM_CTRL1_MASK;
+
+ data |= PHY_PARAM_CTRL1_TX_FULL_SWING(0x6e) |
+ PHY_PARAM_CTRL1_TX_DEEMPH_6DB(0x20) |
+ PHY_PARAM_CTRL1_TX_DEEMPH_3_5DB(phy_dwc3->tx_deamp_3_5db) |
+ PHY_PARAM_CTRL1_LOS_BIAS(0x9);
+
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_PARAM_CTRL_1,
+ PHY_PARAM_CTRL1_MASK, data);
+
+err_phy_trans:
+ return ret;
+}
+
+static int qcom_ipq806x_usb_ss_phy_exit(struct phy *phy)
+{
+ struct usb_phy *phy_dwc3 = phy_get_drvdata(phy);
+
+ /* Sequence to put SSPHY in low power state:
+ * 1. Clear REF_PHY_EN in PHY_CTRL_REG
+ * 2. Clear REF_USE_PAD in PHY_CTRL_REG
+ * 3. Set TEST_POWERED_DOWN in PHY_CTRL_REG to enable PHY retention
+ */
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_SS_PHY_EN, 0x0);
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_REF_USE_PAD, 0x0);
+ usb_phy_write_readback(phy_dwc3, SSUSB_PHY_CTRL_REG,
+ SSUSB_CTRL_TEST_POWERDOWN, 0x0);
+
+ clk_disable_unprepare(phy_dwc3->ref_clk);
+ clk_disable_unprepare(phy_dwc3->xo_clk);
+
+ return 0;
+}
+
+static const struct phy_drvdata qcom_ipq806x_usb_hs_drvdata = {
+ .ops = {
+ .init = qcom_ipq806x_usb_hs_phy_init,
+ .exit = qcom_ipq806x_usb_hs_phy_exit,
+ .owner = THIS_MODULE,
+ },
+ .clk_rate = 60000000,
+};
+
+static const struct phy_drvdata qcom_ipq806x_usb_ss_drvdata = {
+ .ops = {
+ .init = qcom_ipq806x_usb_ss_phy_init,
+ .exit = qcom_ipq806x_usb_ss_phy_exit,
+ .owner = THIS_MODULE,
+ },
+ .clk_rate = 125000000,
+};
+
+static const struct of_device_id qcom_ipq806x_usb_phy_table[] = {
+ { .compatible = "qcom,ipq806x-usb-phy-hs",
+ .data = &qcom_ipq806x_usb_hs_drvdata },
+ { .compatible = "qcom,ipq806x-usb-phy-ss",
+ .data = &qcom_ipq806x_usb_ss_drvdata },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ipq806x_usb_phy_table);
+
+static int qcom_ipq806x_usb_phy_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ resource_size_t size;
+ struct phy *generic_phy;
+ struct usb_phy *phy_dwc3;
+ const struct phy_drvdata *data;
+ struct phy_provider *phy_provider;
+
+ phy_dwc3 = devm_kzalloc(&pdev->dev, sizeof(*phy_dwc3), GFP_KERNEL);
+ if (!phy_dwc3)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ phy_dwc3->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ size = resource_size(res);
+ phy_dwc3->base = devm_ioremap(phy_dwc3->dev, res->start, size);
+
+ if (IS_ERR(phy_dwc3->base)) {
+ dev_err(phy_dwc3->dev, "failed to map reg\n");
+ return PTR_ERR(phy_dwc3->base);
+ }
+
+ phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref");
+ if (IS_ERR(phy_dwc3->ref_clk)) {
+ dev_dbg(phy_dwc3->dev, "cannot get reference clock\n");
+ return PTR_ERR(phy_dwc3->ref_clk);
+ }
+
+ clk_set_rate(phy_dwc3->ref_clk, data->clk_rate);
+
+ phy_dwc3->xo_clk = devm_clk_get(phy_dwc3->dev, "xo");
+ if (IS_ERR(phy_dwc3->xo_clk)) {
+ dev_dbg(phy_dwc3->dev, "cannot get TCXO clock\n");
+ phy_dwc3->xo_clk = NULL;
+ }
+
+ /* Parse device node to probe HSIO settings */
+ if (device_property_read_u32(&pdev->dev, "qcom,rx-eq",
+ &phy_dwc3->rx_eq))
+ phy_dwc3->rx_eq = SSPHY_RX_EQ_VALUE;
+
+ if (device_property_read_u32(&pdev->dev, "qcom,tx-deamp_3_5db",
+ &phy_dwc3->tx_deamp_3_5db))
+ phy_dwc3->tx_deamp_3_5db = SSPHY_TX_DEEMPH_3_5DB;
+
+ if (device_property_read_u32(&pdev->dev, "qcom,mpll", &phy_dwc3->mpll))
+ phy_dwc3->mpll = SSPHY_MPLL_VALUE;
+
+ generic_phy = devm_phy_create(phy_dwc3->dev, pdev->dev.of_node, &data->ops);
+
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy_dwc3);
+ platform_set_drvdata(pdev, phy_dwc3);
+
+ phy_provider = devm_of_phy_provider_register(phy_dwc3->dev,
+ of_phy_simple_xlate);
+
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static struct platform_driver qcom_ipq806x_usb_phy_driver = {
+ .probe = qcom_ipq806x_usb_phy_probe,
+ .driver = {
+ .name = "qcom-ipq806x-usb-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_ipq806x_usb_phy_table,
+ },
+};
+
+module_platform_driver(qcom_ipq806x_usb_phy_driver);
+
+MODULE_ALIAS("platform:phy-qcom-ipq806x-usb");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
+MODULE_DESCRIPTION("DesignWare USB3 QCOM PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index e91040af3394..562053ce9455 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -82,20 +82,34 @@ struct qmp_phy_init_tbl {
* register part of layout ?
* if yes, then offset gives index in the reg-layout
*/
- int in_layout;
+ bool in_layout;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
};
#define QMP_PHY_INIT_CFG(o, v) \
{ \
.offset = o, \
.val = v, \
+ .lane_mask = 0xff, \
}
#define QMP_PHY_INIT_CFG_L(o, v) \
{ \
.offset = o, \
.val = v, \
- .in_layout = 1, \
+ .in_layout = true, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
}
/* set of registers with offsets different per-PHY */
@@ -185,6 +199,17 @@ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x44,
[QPHY_PCS_STATUS] = 0x14,
[QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x308,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x314,
+};
+
+static const unsigned int qmp_v4_usb3_uniphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x608,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x614,
};
static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
@@ -198,6 +223,81 @@ static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
};
+static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x0),
+};
+
+static const struct qmp_phy_init_tbl ipq8074_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -1399,6 +1499,250 @@ static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x95),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0xb8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
@@ -1567,6 +1911,11 @@ static const char * const qmp_v4_phy_clk_l[] = {
"aux", "ref_clk_src", "ref", "com_aux",
};
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -1593,6 +1942,30 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = ipq8074_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = ipq8074_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+ .pcs_tbl = ipq8074_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+};
+
static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.type = PHY_TYPE_PCIE,
.nlanes = 3,
@@ -1986,10 +2359,98 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_configure(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8150_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8150_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8150_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sm8250_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_tx_tbl),
+ .rx_tbl = sm8250_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sm8250_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3_uniphy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static void qcom_qmp_phy_configure_lane(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
{
int i;
const struct qmp_phy_init_tbl *t = tbl;
@@ -1998,6 +2459,9 @@ static void qcom_qmp_phy_configure(void __iomem *base,
return;
for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
if (t->in_layout)
writel(t->val, base + regs[t->offset]);
else
@@ -2005,6 +2469,14 @@ static void qcom_qmp_phy_configure(void __iomem *base,
}
}
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qcom_qmp_phy_configure_lane(base, regs, tbl, num, 0xff);
+}
+
static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
@@ -2219,16 +2691,18 @@ static int qcom_qmp_phy_enable(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ qcom_qmp_phy_configure_lane(tx, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 1);
/* Configuration for other LANE for USB-DP combo PHY */
if (cfg->is_dual_lane_phy)
- qcom_qmp_phy_configure(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num);
+ qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ qcom_qmp_phy_configure_lane(rx, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 1);
if (cfg->is_dual_lane_phy)
- qcom_qmp_phy_configure(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num);
+ qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
@@ -2699,6 +3173,9 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq8074-qmp-usb3-phy",
+ .data = &ipq8074_usb3phy_cfg,
+ }, {
.compatible = "qcom,msm8996-qmp-pcie-phy",
.data = &msm8996_pciephy_cfg,
}, {
@@ -2746,6 +3223,15 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sm8150-qmp-usb3-phy",
.data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-uni-phy",
+ .data = &sm8150_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-phy",
+ .data = &sm8250_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-uni-phy",
+ .data = &sm8250_usb3_uniphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 6d017a0c0c8d..4277f592684b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -363,7 +363,10 @@
/* Only for QMP V4 PHY - TX registers */
#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX 0x3c
+#define QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX 0x40
#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_LANE_MODE_2 0x88
#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
@@ -709,6 +712,10 @@
#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+/* Only for QMP V4 PHY - UNI has 0x300 offset for PCS_USB3 regs */
+#define QPHY_V4_PCS_USB3_UNI_LFPS_DET_HIGH_COUNT_VAL 0x618
+#define QPHY_V4_PCS_USB3_UNI_RXEQTRAINING_DFE_TIME_S2 0x638
+
/* Only for QMP V4 PHY - PCS_MISC registers */
#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 393011a05b48..557547dabfd5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -810,6 +810,9 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
+ .compatible = "qcom,ipq8074-qusb2-phy",
+ .data = &msm8996_phy_cfg,
+ }, {
.compatible = "qcom,msm8996-qusb2-phy",
.data = &msm8996_phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index 4d74045271eb..ae4bac024c7b 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -77,6 +77,7 @@ static const char * const qcom_snps_hsphy_vreg_names[] = {
* @phy_reset: phy reset control
* @vregs: regulator supplies bulk data
* @phy_initialized: if PHY has been initialized correctly
+ * @mode: contains the current mode the PHY is in
*/
struct qcom_snps_hsphy {
struct phy *phy;
@@ -88,6 +89,7 @@ struct qcom_snps_hsphy {
struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
bool phy_initialized;
+ enum phy_mode mode;
};
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
@@ -104,6 +106,72 @@ static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
readl_relaxed(base + offset);
}
+static int qcom_snps_hsphy_suspend(struct qcom_snps_hsphy *hsphy)
+{
+ dev_dbg(&hsphy->phy->dev, "Suspend QCOM SNPS PHY\n");
+
+ if (hsphy->mode == PHY_MODE_USB_HOST) {
+ /* Enable auto-resume to meet remote wakeup timing */
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_AUTO_RESUME,
+ USB2_AUTO_RESUME);
+ usleep_range(500, 1000);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ 0, USB2_AUTO_RESUME);
+ }
+
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ return 0;
+}
+
+static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy)
+{
+ int ret;
+
+ dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n");
+
+ ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+ if (ret) {
+ dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused qcom_snps_hsphy_runtime_suspend(struct device *dev)
+{
+ struct qcom_snps_hsphy *hsphy = dev_get_drvdata(dev);
+
+ if (!hsphy->phy_initialized)
+ return 0;
+
+ qcom_snps_hsphy_suspend(hsphy);
+ return 0;
+}
+
+static int __maybe_unused qcom_snps_hsphy_runtime_resume(struct device *dev)
+{
+ struct qcom_snps_hsphy *hsphy = dev_get_drvdata(dev);
+
+ if (!hsphy->phy_initialized)
+ return 0;
+
+ qcom_snps_hsphy_resume(hsphy);
+ return 0;
+}
+
+static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+
+ hsphy->mode = mode;
+ return 0;
+}
+
static int qcom_snps_hsphy_init(struct phy *phy)
{
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
@@ -201,6 +269,7 @@ static int qcom_snps_hsphy_exit(struct phy *phy)
static const struct phy_ops qcom_snps_hsphy_gen_ops = {
.init = qcom_snps_hsphy_init,
.exit = qcom_snps_hsphy_exit,
+ .set_mode = qcom_snps_hsphy_set_mode,
.owner = THIS_MODULE,
};
@@ -212,6 +281,11 @@ static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_of_match_table);
+static const struct dev_pm_ops qcom_snps_hsphy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_snps_hsphy_runtime_suspend,
+ qcom_snps_hsphy_runtime_resume, NULL)
+};
+
static int qcom_snps_hsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -255,6 +329,14 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
generic_phy = devm_phy_create(dev, NULL, &qcom_snps_hsphy_gen_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
@@ -269,6 +351,8 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (!IS_ERR(phy_provider))
dev_dbg(dev, "Registered Qcom-SNPS HS phy\n");
+ else
+ pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
}
@@ -277,6 +361,7 @@ static struct platform_driver qcom_snps_hsphy_driver = {
.probe = qcom_snps_hsphy_probe,
.driver = {
.name = "qcom-snps-hs-femto-v2-phy",
+ .pm = &qcom_snps_hsphy_pm_ops,
.of_match_table = qcom_snps_hsphy_of_match_table,
},
};
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
deleted file mode 100644
index 9bf973a0d8c3..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_I_H_
-#define UFS_QCOM_PHY_I_H_
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-
-#define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \
- { \
- .reg_offset = reg, \
- .cfg_value = val, \
- }
-
-#define UFS_QCOM_PHY_NAME_LEN 30
-
-enum {
- MASK_SERDES_START = 0x1,
- MASK_PCS_READY = 0x1,
-};
-
-enum {
- OFFSET_SERDES_START = 0x0,
-};
-
-struct ufs_qcom_phy_stored_attributes {
- u32 att;
- u32 value;
-};
-
-
-struct ufs_qcom_phy_calibration {
- u32 reg_offset;
- u32 cfg_value;
-};
-
-struct ufs_qcom_phy_vreg {
- const char *name;
- struct regulator *reg;
- int max_uA;
- int min_uV;
- int max_uV;
- bool enabled;
-};
-
-struct ufs_qcom_phy {
- struct list_head list;
- struct device *dev;
- void __iomem *mmio;
- void __iomem *dev_ref_clk_ctrl_mmio;
- struct clk *tx_iface_clk;
- struct clk *rx_iface_clk;
- bool is_iface_clk_enabled;
- struct clk *ref_clk_src;
- struct clk *ref_clk_parent;
- struct clk *ref_clk;
- bool is_ref_clk_enabled;
- bool is_dev_ref_clk_enabled;
- struct ufs_qcom_phy_vreg vdda_pll;
- struct ufs_qcom_phy_vreg vdda_phy;
- struct ufs_qcom_phy_vreg vddp_ref_clk;
- unsigned int quirks;
-
- /*
- * If UFS link is put into Hibern8 and if UFS PHY analog hardware is
- * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8
- * exit might fail even after powering on UFS PHY analog hardware.
- * Enabling this quirk will help to solve above issue by doing
- * custom PHY settings just before PHY analog power collapse.
- */
- #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE BIT(0)
-
- u8 host_ctrl_rev_major;
- u16 host_ctrl_rev_minor;
- u16 host_ctrl_rev_step;
-
- char name[UFS_QCOM_PHY_NAME_LEN];
- struct ufs_qcom_phy_calibration *cached_regs;
- int cached_regs_table_size;
- struct ufs_qcom_phy_specific_ops *phy_spec_ops;
-
- enum phy_mode mode;
- struct reset_control *ufs_reset;
-};
-
-/**
- * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
- * specific implementation per phy. Each UFS phy, should implement
- * those functions according to its spec and requirements
- * @start_serdes: pointer to a function that starts the serdes
- * @is_physical_coding_sublayer_ready: pointer to a function that
- * checks pcs readiness. returns 0 for success and non-zero for error.
- * @set_tx_lane_enable: pointer to a function that enable tx lanes
- * @power_control: pointer to a function that controls analog rail of phy
- * and writes to QSERDES_RX_SIGDET_CNTRL attribute
- */
-struct ufs_qcom_phy_specific_ops {
- int (*calibrate)(struct ufs_qcom_phy *ufs_qcom_phy, bool is_rate_B);
- void (*start_serdes)(struct ufs_qcom_phy *phy);
- int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
- void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
- void (*power_control)(struct ufs_qcom_phy *phy, bool val);
-};
-
-struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
-int ufs_qcom_phy_power_on(struct phy *generic_phy);
-int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_remove(struct phy *generic_phy,
- struct ufs_qcom_phy *ufs_qcom_phy);
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops);
-int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
- struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
- bool is_rate_B);
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
deleted file mode 100644
index 54b355bfc24c..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-qmp-14nm.h"
-
-#define UFS_PHY_NAME "ufs_phy_qmp_14nm"
-#define UFS_PHY_VDDA_PHY_UV (925000)
-
-static
-int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- bool is_rate_B)
-{
- int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
- int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
- int err;
-
- err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
- tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
-
- if (err)
- dev_err(ufs_qcom_phy->dev,
- "%s: ufs_qcom_phy_calibrate() failed %d\n",
- __func__, err);
- return err;
-}
-
-static
-void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
-{
- phy_common->quirks =
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-}
-
-static
-int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy,
- enum phy_mode mode, int submode)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->mode = PHY_MODE_INVALID;
-
- if (mode > 0)
- phy_common->mode = mode;
-
- return 0;
-}
-
-static
-void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
-{
- writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * Before any transactions involving PHY, ensure PHY knows
- * that it's analog rail is powered ON (or OFF).
- */
- mb();
-}
-
-static inline
-void ufs_qcom_phy_qmp_14nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
-{
- /*
- * 14nm PHY does not have TX_LANE_ENABLE register.
- * Implement this function so as not to propagate error to caller.
- */
-}
-
-static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
-{
- u32 tmp;
-
- tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
- tmp &= ~MASK_SERDES_START;
- tmp |= (1 << OFFSET_SERDES_START);
- writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
- /* Ensure register value is committed */
- mb();
-}
-
-static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
-{
- int err = 0;
- u32 val;
-
- err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
- val, (val & MASK_PCS_READY), 10, 1000000);
- if (err)
- dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
- __func__, err);
- return err;
-}
-
-static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
- .power_on = ufs_qcom_phy_power_on,
- .power_off = ufs_qcom_phy_power_off,
- .set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
- .owner = THIS_MODULE,
-};
-
-static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
- .calibrate = ufs_qcom_phy_qmp_14nm_phy_calibrate,
- .start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
- .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
- .set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
- .power_control = ufs_qcom_phy_qmp_14nm_power_control,
-};
-
-static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct ufs_qcom_phy_qmp_14nm *phy;
- struct ufs_qcom_phy *phy_common;
- int err = 0;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- err = -ENOMEM;
- goto out;
- }
- phy_common = &phy->common_cfg;
-
- generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
- &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
-
- if (!generic_phy) {
- err = -EIO;
- goto out;
- }
-
- err = ufs_qcom_phy_init_clks(phy_common);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vregulators(phy_common);
- if (err)
- goto out;
-
- phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
- phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
- ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-
- phy_set_drvdata(generic_phy, phy);
-
- strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
-
-out:
- return err;
-}
-
-static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
- {.compatible = "qcom,ufs-phy-qmp-14nm"},
- {.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
-
-static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
- .probe = ufs_qcom_phy_qmp_14nm_probe,
- .driver = {
- .of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
- .name = "ufs_qcom_phy_qmp_14nm",
- },
-};
-
-module_platform_driver(ufs_qcom_phy_qmp_14nm_driver);
-
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 14nm");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
deleted file mode 100644
index ceca654b9338..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_QMP_14NM_H_
-#define UFS_QCOM_PHY_QMP_14NM_H_
-
-#include "phy-qcom-ufs-i.h"
-
-/* QCOM UFS PHY control registers */
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
-
-/* UFS PHY QSERDES COM registers */
-#define QSERDES_COM_BG_TIMER COM_OFF(0x0C)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34)
-#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C)
-#define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0x4C)
-#define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0x50)
-#define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0x54)
-#define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0x58)
-#define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0x5C)
-#define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0x60)
-#define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78)
-#define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C)
-#define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84)
-#define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88)
-#define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90)
-#define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94)
-#define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC)
-#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4)
-#define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8)
-#define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC)
-#define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0)
-#define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4)
-#define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC)
-#define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0)
-#define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4)
-#define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8)
-#define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC)
-#define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C)
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110)
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114)
-#define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124)
-#define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128)
-#define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C)
-#define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130)
-#define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134)
-#define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138)
-#define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144)
-#define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148)
-#define QSERDES_COM_CLK_SELECT COM_OFF(0x174)
-#define QSERDES_COM_HSCLK_SEL COM_OFF(0x178)
-#define QSERDES_COM_CORECLK_DIV COM_OFF(0x184)
-#define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C)
-#define QSERDES_COM_CMN_CONFIG COM_OFF(0x194)
-#define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C)
-#define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC)
-
-/* UFS PHY registers */
-#define UFS_PHY_PHY_START PHY_OFF(0x00)
-#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04)
-#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168)
-
-/* UFS PHY TX registers */
-#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0, 0x68)
-#define QSERDES_TX_LANE_MODE TX_OFF(0, 0x94)
-
-/* UFS PHY RX registers */
-#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0, 0x40)
-#define QSERDES_RX_RX_TERM_BW RX_OFF(0, 0x90)
-#define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0, 0xC4)
-#define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0, 0xC8)
-#define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0, 0xCC)
-#define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0, 0xD0)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0, 0xD8)
-#define QSERDES_RX_SIGDET_CNTRL RX_OFF(0, 0x114)
-#define QSERDES_RX_SIGDET_LVL RX_OFF(0, 0x118)
-#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x11C)
-#define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0, 0x12C)
-
-/*
- * This structure represents the 14nm specific phy.
- * common_cfg MUST remain the first field in this structure
- * in case extra fields are added. This way, when calling
- * get_ufs_qcom_phy() of generic phy, we can extract the
- * common phy structure (struct ufs_qcom_phy) out of it
- * regardless of the relevant specific phy.
- */
-struct ufs_qcom_phy_qmp_14nm {
- struct ufs_qcom_phy common_cfg;
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
-
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
-
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
-};
-
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
deleted file mode 100644
index 3e9d8b71e995..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-qmp-20nm.h"
-
-#define UFS_PHY_NAME "ufs_phy_qmp_20nm"
-
-static
-int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- bool is_rate_B)
-{
- struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
- int tbl_size_A, tbl_size_B;
- u8 major = ufs_qcom_phy->host_ctrl_rev_major;
- u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
- u16 step = ufs_qcom_phy->host_ctrl_rev_step;
- int err;
-
- if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) {
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0);
- tbl_A = phy_cal_table_rate_A_1_2_0;
- } else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) {
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0);
- tbl_A = phy_cal_table_rate_A_1_3_0;
- } else {
- dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n",
- __func__);
- err = -ENODEV;
- goto out;
- }
-
- tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
- tbl_B = phy_cal_table_rate_B;
-
- err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A,
- tbl_B, tbl_size_B, is_rate_B);
-
- if (err)
- dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n",
- __func__, err);
-
-out:
- return err;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
-{
- phy_common->quirks =
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-}
-
-static
-int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy,
- enum phy_mode mode, int submode)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->mode = PHY_MODE_INVALID;
-
- if (mode > 0)
- phy_common->mode = mode;
-
- return 0;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
-{
- bool hibern8_exit_after_pwr_collapse = phy->quirks &
- UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-
- if (val) {
- writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * Before any transactions involving PHY, ensure PHY knows
- * that it's analog rail is powered ON.
- */
- mb();
-
- if (hibern8_exit_after_pwr_collapse) {
- /*
- * Give atleast 1us delay after restoring PHY analog
- * power.
- */
- usleep_range(1, 2);
- writel_relaxed(0x0A, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- writel_relaxed(0x08, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- /*
- * Make sure workaround is deactivated before proceeding
- * with normal PHY operations.
- */
- mb();
- }
- } else {
- if (hibern8_exit_after_pwr_collapse) {
- writel_relaxed(0x0A, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- writel_relaxed(0x02, phy->mmio +
- QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
- /*
- * Make sure that above workaround is activated before
- * PHY analog power collapse.
- */
- mb();
- }
-
- writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
- /*
- * ensure that PHY knows its PHY analog rail is going
- * to be powered down
- */
- mb();
- }
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
-{
- writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK,
- phy->mmio + UFS_PHY_TX_LANE_ENABLE);
- mb();
-}
-
-static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
-{
- u32 tmp;
-
- tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
- tmp &= ~MASK_SERDES_START;
- tmp |= (1 << OFFSET_SERDES_START);
- writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
- mb();
-}
-
-static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
-{
- int err = 0;
- u32 val;
-
- err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
- val, (val & MASK_PCS_READY), 10, 1000000);
- if (err)
- dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
- __func__, err);
- return err;
-}
-
-static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
- .power_on = ufs_qcom_phy_power_on,
- .power_off = ufs_qcom_phy_power_off,
- .set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
- .owner = THIS_MODULE,
-};
-
-static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
- .calibrate = ufs_qcom_phy_qmp_20nm_phy_calibrate,
- .start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
- .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
- .set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
- .power_control = ufs_qcom_phy_qmp_20nm_power_control,
-};
-
-static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct ufs_qcom_phy_qmp_20nm *phy;
- struct ufs_qcom_phy *phy_common;
- int err = 0;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- err = -ENOMEM;
- goto out;
- }
- phy_common = &phy->common_cfg;
-
- generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
- &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
-
- if (!generic_phy) {
- err = -EIO;
- goto out;
- }
-
- err = ufs_qcom_phy_init_clks(phy_common);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vregulators(phy_common);
- if (err)
- goto out;
-
- ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-
- phy_set_drvdata(generic_phy, phy);
-
- strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
-
-out:
- return err;
-}
-
-static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
- {.compatible = "qcom,ufs-phy-qmp-20nm"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
-
-static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
- .probe = ufs_qcom_phy_qmp_20nm_probe,
- .driver = {
- .of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
- .name = "ufs_qcom_phy_qmp_20nm",
- },
-};
-
-module_platform_driver(ufs_qcom_phy_qmp_20nm_driver);
-
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
deleted file mode 100644
index 8ce79f524eed..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_PHY_QMP_20NM_H_
-#define UFS_QCOM_PHY_QMP_20NM_H_
-
-#include "phy-qcom-ufs-i.h"
-
-/* QCOM UFS PHY control registers */
-
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
-
-/* UFS PHY PLL block registers */
-#define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x0)
-#define QSERDES_COM_PLL_VCOTAIL_EN COM_OFF(0x04)
-#define QSERDES_COM_PLL_CNTRL COM_OFF(0x14)
-#define QSERDES_COM_PLL_IP_SETI COM_OFF(0x24)
-#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL COM_OFF(0x28)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x30)
-#define QSERDES_COM_PLL_CP_SETI COM_OFF(0x34)
-#define QSERDES_COM_PLL_IP_SETP COM_OFF(0x38)
-#define QSERDES_COM_PLL_CP_SETP COM_OFF(0x3C)
-#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND COM_OFF(0x48)
-#define QSERDES_COM_RESETSM_CNTRL COM_OFF(0x4C)
-#define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0x50)
-#define QSERDES_COM_PLLLOCK_CMP1 COM_OFF(0x90)
-#define QSERDES_COM_PLLLOCK_CMP2 COM_OFF(0x94)
-#define QSERDES_COM_PLLLOCK_CMP3 COM_OFF(0x98)
-#define QSERDES_COM_PLLLOCK_CMP_EN COM_OFF(0x9C)
-#define QSERDES_COM_BGTC COM_OFF(0xA0)
-#define QSERDES_COM_DEC_START1 COM_OFF(0xAC)
-#define QSERDES_COM_PLL_AMP_OS COM_OFF(0xB0)
-#define QSERDES_COM_RES_CODE_UP_OFFSET COM_OFF(0xD8)
-#define QSERDES_COM_RES_CODE_DN_OFFSET COM_OFF(0xDC)
-#define QSERDES_COM_DIV_FRAC_START1 COM_OFF(0x100)
-#define QSERDES_COM_DIV_FRAC_START2 COM_OFF(0x104)
-#define QSERDES_COM_DIV_FRAC_START3 COM_OFF(0x108)
-#define QSERDES_COM_DEC_START2 COM_OFF(0x10C)
-#define QSERDES_COM_PLL_RXTXEPCLK_EN COM_OFF(0x110)
-#define QSERDES_COM_PLL_CRCTRL COM_OFF(0x114)
-#define QSERDES_COM_PLL_CLKEPDIV COM_OFF(0x118)
-
-/* TX LANE n (0, 1) registers */
-#define QSERDES_TX_EMP_POST1_LVL(n) TX_OFF(n, 0x08)
-#define QSERDES_TX_DRV_LVL(n) TX_OFF(n, 0x0C)
-#define QSERDES_TX_LANE_MODE(n) TX_OFF(n, 0x54)
-
-/* RX LANE n (0, 1) registers */
-#define QSERDES_RX_CDR_CONTROL1(n) RX_OFF(n, 0x0)
-#define QSERDES_RX_CDR_CONTROL_HALF(n) RX_OFF(n, 0x8)
-#define QSERDES_RX_RX_EQ_GAIN1_LSB(n) RX_OFF(n, 0xA8)
-#define QSERDES_RX_RX_EQ_GAIN1_MSB(n) RX_OFF(n, 0xAC)
-#define QSERDES_RX_RX_EQ_GAIN2_LSB(n) RX_OFF(n, 0xB0)
-#define QSERDES_RX_RX_EQ_GAIN2_MSB(n) RX_OFF(n, 0xB4)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n) RX_OFF(n, 0xBC)
-#define QSERDES_RX_CDR_CONTROL_QUARTER(n) RX_OFF(n, 0xC)
-#define QSERDES_RX_SIGDET_CNTRL(n) RX_OFF(n, 0x100)
-
-/* UFS PHY registers */
-#define UFS_PHY_PHY_START PHY_OFF(0x00)
-#define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x4)
-#define UFS_PHY_TX_LANE_ENABLE PHY_OFF(0x44)
-#define UFS_PHY_PWM_G1_CLK_DIVIDER PHY_OFF(0x08)
-#define UFS_PHY_PWM_G2_CLK_DIVIDER PHY_OFF(0x0C)
-#define UFS_PHY_PWM_G3_CLK_DIVIDER PHY_OFF(0x10)
-#define UFS_PHY_PWM_G4_CLK_DIVIDER PHY_OFF(0x14)
-#define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER PHY_OFF(0x34)
-#define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER PHY_OFF(0x38)
-#define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER PHY_OFF(0x3C)
-#define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER PHY_OFF(0x40)
-#define UFS_PHY_OMC_STATUS_RDVAL PHY_OFF(0x68)
-#define UFS_PHY_LINE_RESET_TIME PHY_OFF(0x28)
-#define UFS_PHY_LINE_RESET_GRANULARITY PHY_OFF(0x2C)
-#define UFS_PHY_TSYNC_RSYNC_CNTL PHY_OFF(0x48)
-#define UFS_PHY_PLL_CNTL PHY_OFF(0x50)
-#define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x54)
-#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x5C)
-#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL PHY_OFF(0x58)
-#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL PHY_OFF(0x60)
-#define UFS_PHY_CFG_CHANGE_CNT_VAL PHY_OFF(0x64)
-#define UFS_PHY_RX_SYNC_WAIT_TIME PHY_OFF(0x6C)
-#define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB4)
-#define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE0)
-#define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB8)
-#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE4)
-#define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xBC)
-#define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xE8)
-#define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY PHY_OFF(0xFC)
-#define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY PHY_OFF(0x100)
-#define UFS_PHY_RX_SIGDET_CTRL3 PHY_OFF(0x14c)
-#define UFS_PHY_RMMI_ATTR_CTRL PHY_OFF(0x160)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L1 (1 << 7)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L1 (1 << 6)
-#define UFS_PHY_RMMI_CFGWR_L1 (1 << 5)
-#define UFS_PHY_RMMI_CFGRD_L1 (1 << 4)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L0 (1 << 3)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L0 (1 << 2)
-#define UFS_PHY_RMMI_CFGWR_L0 (1 << 1)
-#define UFS_PHY_RMMI_CFGRD_L0 (1 << 0)
-#define UFS_PHY_RMMI_ATTRID PHY_OFF(0x164)
-#define UFS_PHY_RMMI_ATTRWRVAL PHY_OFF(0x168)
-#define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS PHY_OFF(0x16C)
-#define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS PHY_OFF(0x170)
-#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x174)
-
-#define UFS_PHY_TX_LANE_ENABLE_MASK 0x3
-
-/*
- * This structure represents the 20nm specific phy.
- * common_cfg MUST remain the first field in this structure
- * in case extra fields are added. This way, when calling
- * get_ufs_qcom_phy() of generic phy, we can extract the
- * common phy structure (struct ufs_qcom_phy) out of it
- * regardless of the relevant specific phy.
- */
-struct ufs_qcom_phy_qmp_20nm {
- struct ufs_qcom_phy common_cfg;
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = {
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e),
-};
-
-#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
deleted file mode 100644
index 763c8d396af1..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- */
-
-#include "phy-qcom-ufs-i.h"
-
-#define MAX_PROP_NAME 32
-#define VDDA_PHY_MIN_UV 1000000
-#define VDDA_PHY_MAX_UV 1000000
-#define VDDA_PLL_MIN_UV 1800000
-#define VDDA_PLL_MAX_UV 1800000
-#define VDDP_REF_CLK_MIN_UV 1200000
-#define VDDP_REF_CLK_MAX_UV 1200000
-
-int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
- struct ufs_qcom_phy_calibration *tbl_A,
- int tbl_size_A,
- struct ufs_qcom_phy_calibration *tbl_B,
- int tbl_size_B, bool is_rate_B)
-{
- int i;
- int ret = 0;
-
- if (!tbl_A) {
- dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
- ret = EINVAL;
- goto out;
- }
-
- for (i = 0; i < tbl_size_A; i++)
- writel_relaxed(tbl_A[i].cfg_value,
- ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
-
- /*
- * In case we would like to work in rate B, we need
- * to override a registers that were configured in rate A table
- * with registers of rate B table.
- * table.
- */
- if (is_rate_B) {
- if (!tbl_B) {
- dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
- __func__);
- ret = EINVAL;
- goto out;
- }
-
- for (i = 0; i < tbl_size_B; i++)
- writel_relaxed(tbl_B[i].cfg_value,
- ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
- }
-
- /* flush buffered writes */
- mb();
-
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
-
-/*
- * This assumes the embedded phy structure inside generic_phy is of type
- * struct ufs_qcom_phy. In order to function properly it's crucial
- * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
- * as the first inside generic_phy.
- */
-struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
-{
- return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
-}
-EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
-
-static
-int ufs_qcom_phy_base_init(struct platform_device *pdev,
- struct ufs_qcom_phy *phy_common)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- int err = 0;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
- phy_common->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR((void const *)phy_common->mmio)) {
- err = PTR_ERR((void const *)phy_common->mmio);
- phy_common->mmio = NULL;
- dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
- __func__, err);
- return err;
- }
-
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dev_ref_clk_ctrl_mem");
- phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
- phy_common->dev_ref_clk_ctrl_mmio = NULL;
-
- return 0;
-}
-
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
- struct ufs_qcom_phy *common_cfg,
- const struct phy_ops *ufs_qcom_phy_gen_ops,
- struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy = NULL;
- struct phy_provider *phy_provider;
-
- err = ufs_qcom_phy_base_init(pdev, common_cfg);
- if (err) {
- dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
- goto out;
- }
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider)) {
- err = PTR_ERR(phy_provider);
- dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
- goto out;
- }
-
- generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
- if (IS_ERR(generic_phy)) {
- err = PTR_ERR(generic_phy);
- dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
- generic_phy = NULL;
- goto out;
- }
-
- common_cfg->phy_spec_ops = phy_spec_ops;
- common_cfg->dev = dev;
-
-out:
- return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
-static int ufs_qcom_phy_get_reset(struct ufs_qcom_phy *phy_common)
-{
- struct reset_control *reset;
-
- if (phy_common->ufs_reset)
- return 0;
-
- reset = devm_reset_control_get_exclusive_by_index(phy_common->dev, 0);
- if (IS_ERR(reset))
- return PTR_ERR(reset);
-
- phy_common->ufs_reset = reset;
- return 0;
-}
-
-static int __ufs_qcom_phy_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool err_print)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- if (err_print)
- dev_err(dev, "failed to get %s err %d", name, err);
- } else {
- *clk_out = clk;
- }
-
- return err;
-}
-
-static int ufs_qcom_phy_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
-{
- return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
-}
-
-int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- if (of_device_is_compatible(phy_common->dev->of_node,
- "qcom,msm8996-ufs-phy-qmp-14nm"))
- goto skip_txrx_clk;
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
- &phy_common->tx_iface_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
- &phy_common->rx_iface_clk);
- if (err)
- goto out;
-
-skip_txrx_clk:
- err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
- &phy_common->ref_clk_src);
- if (err)
- goto out;
-
- /*
- * "ref_clk_parent" is optional hence don't abort init if it's not
- * found.
- */
- __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
- &phy_common->ref_clk_parent, false);
-
- err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
- &phy_common->ref_clk);
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
-
-static int ufs_qcom_phy_init_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg,
- const char *name)
-{
- int err = 0;
-
- char prop_name[MAX_PROP_NAME];
-
- vreg->name = name;
- vreg->reg = devm_regulator_get(dev, name);
- if (IS_ERR(vreg->reg)) {
- err = PTR_ERR(vreg->reg);
- dev_err(dev, "failed to get %s, %d\n", name, err);
- goto out;
- }
-
- if (dev->of_node) {
- snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
- err = of_property_read_u32(dev->of_node,
- prop_name, &vreg->max_uA);
- if (err && err != -EINVAL) {
- dev_err(dev, "%s: failed to read %s\n",
- __func__, prop_name);
- goto out;
- } else if (err == -EINVAL || !vreg->max_uA) {
- if (regulator_count_voltages(vreg->reg) > 0) {
- dev_err(dev, "%s: %s is mandatory\n",
- __func__, prop_name);
- goto out;
- }
- err = 0;
- }
- }
-
- if (!strcmp(name, "vdda-pll")) {
- vreg->max_uV = VDDA_PLL_MAX_UV;
- vreg->min_uV = VDDA_PLL_MIN_UV;
- } else if (!strcmp(name, "vdda-phy")) {
- vreg->max_uV = VDDA_PHY_MAX_UV;
- vreg->min_uV = VDDA_PHY_MIN_UV;
- } else if (!strcmp(name, "vddp-ref-clk")) {
- vreg->max_uV = VDDP_REF_CLK_MAX_UV;
- vreg->min_uV = VDDP_REF_CLK_MIN_UV;
- }
-
-out:
- return err;
-}
-
-int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
-{
- int err;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
- "vdda-pll");
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
- "vdda-phy");
-
- if (err)
- goto out;
-
- err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
- "vddp-ref-clk");
-
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int ufs_qcom_phy_cfg_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg, bool on)
-{
- int ret = 0;
- struct regulator *reg = vreg->reg;
- const char *name = vreg->name;
- int min_uV;
- int uA_load;
-
- if (regulator_count_voltages(reg) > 0) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
- __func__, name, ret);
- goto out;
- }
- uA_load = on ? vreg->max_uA : 0;
- ret = regulator_set_load(reg, uA_load);
- if (ret >= 0) {
- /*
- * regulator_set_load() returns new regulator
- * mode upon success.
- */
- ret = 0;
- } else {
- dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
- __func__, name, uA_load, ret);
- goto out;
- }
- }
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_enable_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || vreg->enabled)
- goto out;
-
- ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
- if (ret) {
- dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
- __func__, ret);
- goto out;
- }
-
- ret = regulator_enable(vreg->reg);
- if (ret) {
- dev_err(dev, "%s: enable failed, err=%d\n",
- __func__, ret);
- goto out;
- }
-
- vreg->enabled = true;
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
-{
- int ret = 0;
-
- if (phy->is_ref_clk_enabled)
- goto out;
-
- /*
- * reference clock is propagated in a daisy-chained manner from
- * source to phy, so ungate them at each stage.
- */
- ret = clk_prepare_enable(phy->ref_clk_src);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * "ref_clk_parent" is optional clock hence make sure that clk reference
- * is available before trying to enable the clock.
- */
- if (phy->ref_clk_parent) {
- ret = clk_prepare_enable(phy->ref_clk_parent);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
- __func__, ret);
- goto out_disable_src;
- }
- }
-
- ret = clk_prepare_enable(phy->ref_clk);
- if (ret) {
- dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
- __func__, ret);
- goto out_disable_parent;
- }
-
- phy->is_ref_clk_enabled = true;
- goto out;
-
-out_disable_parent:
- if (phy->ref_clk_parent)
- clk_disable_unprepare(phy->ref_clk_parent);
-out_disable_src:
- clk_disable_unprepare(phy->ref_clk_src);
-out:
- return ret;
-}
-
-static int ufs_qcom_phy_disable_vreg(struct device *dev,
- struct ufs_qcom_phy_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || !vreg->enabled)
- goto out;
-
- ret = regulator_disable(vreg->reg);
-
- if (!ret) {
- /* ignore errors on applying disable config */
- ufs_qcom_phy_cfg_vreg(dev, vreg, false);
- vreg->enabled = false;
- } else {
- dev_err(dev, "%s: %s disable failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
-{
- if (phy->is_ref_clk_enabled) {
- clk_disable_unprepare(phy->ref_clk);
- /*
- * "ref_clk_parent" is optional clock hence make sure that clk
- * reference is available before trying to disable the clock.
- */
- if (phy->ref_clk_parent)
- clk_disable_unprepare(phy->ref_clk_parent);
- clk_disable_unprepare(phy->ref_clk_src);
- phy->is_ref_clk_enabled = false;
- }
-}
-
-/* Turn ON M-PHY RMMI interface clocks */
-static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
-{
- int ret = 0;
-
- if (phy->is_iface_clk_enabled)
- goto out;
-
- ret = clk_prepare_enable(phy->tx_iface_clk);
- if (ret) {
- dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
- __func__, ret);
- goto out;
- }
- ret = clk_prepare_enable(phy->rx_iface_clk);
- if (ret) {
- clk_disable_unprepare(phy->tx_iface_clk);
- dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
- __func__, ret);
- goto out;
- }
- phy->is_iface_clk_enabled = true;
-
-out:
- return ret;
-}
-
-/* Turn OFF M-PHY RMMI interface clocks */
-static void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
-{
- if (phy->is_iface_clk_enabled) {
- clk_disable_unprepare(phy->tx_iface_clk);
- clk_disable_unprepare(phy->rx_iface_clk);
- phy->is_iface_clk_enabled = false;
- }
-}
-
-static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
-{
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
- dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
- }
-
- return ret;
-}
-
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
- dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
- tx_lanes);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
-
-void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
- u8 major, u16 minor, u16 step)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
- ufs_qcom_phy->host_ctrl_rev_major = major;
- ufs_qcom_phy->host_ctrl_rev_minor = minor;
- ufs_qcom_phy->host_ctrl_rev_step = step;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
-
-static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
-{
- if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
- dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
- __func__);
- return -ENOTSUPP;
- }
-
- return ufs_qcom_phy->phy_spec_ops->
- is_physical_coding_sublayer_ready(ufs_qcom_phy);
-}
-
-int ufs_qcom_phy_power_on(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
- struct device *dev = phy_common->dev;
- bool is_rate_B = false;
- int err;
-
- err = ufs_qcom_phy_get_reset(phy_common);
- if (err)
- return err;
-
- err = reset_control_assert(phy_common->ufs_reset);
- if (err)
- return err;
-
- if (phy_common->mode == PHY_MODE_UFS_HS_B)
- is_rate_B = true;
-
- err = phy_common->phy_spec_ops->calibrate(phy_common, is_rate_B);
- if (err)
- return err;
-
- err = reset_control_deassert(phy_common->ufs_reset);
- if (err) {
- dev_err(dev, "Failed to assert UFS PHY reset");
- return err;
- }
-
- err = ufs_qcom_phy_start_serdes(phy_common);
- if (err)
- return err;
-
- err = ufs_qcom_phy_is_pcs_ready(phy_common);
- if (err)
- return err;
-
- err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
- if (err) {
- dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
- __func__, err);
- goto out;
- }
-
- phy_common->phy_spec_ops->power_control(phy_common, true);
-
- /* vdda_pll also enables ref clock LDOs so enable it first */
- err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
- if (err) {
- dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
- __func__, err);
- goto out_disable_phy;
- }
-
- err = ufs_qcom_phy_enable_iface_clk(phy_common);
- if (err) {
- dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
- __func__, err);
- goto out_disable_pll;
- }
-
- err = ufs_qcom_phy_enable_ref_clk(phy_common);
- if (err) {
- dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- goto out_disable_iface_clk;
- }
-
- /* enable device PHY ref_clk pad rail */
- if (phy_common->vddp_ref_clk.reg) {
- err = ufs_qcom_phy_enable_vreg(dev,
- &phy_common->vddp_ref_clk);
- if (err) {
- dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
- __func__, err);
- goto out_disable_ref_clk;
- }
- }
-
- goto out;
-
-out_disable_ref_clk:
- ufs_qcom_phy_disable_ref_clk(phy_common);
-out_disable_iface_clk:
- ufs_qcom_phy_disable_iface_clk(phy_common);
-out_disable_pll:
- ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
-out_disable_phy:
- ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
-
-int ufs_qcom_phy_power_off(struct phy *generic_phy)
-{
- struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
-
- phy_common->phy_spec_ops->power_control(phy_common, false);
-
- if (phy_common->vddp_ref_clk.reg)
- ufs_qcom_phy_disable_vreg(phy_common->dev,
- &phy_common->vddp_ref_clk);
- ufs_qcom_phy_disable_ref_clk(phy_common);
- ufs_qcom_phy_disable_iface_clk(phy_common);
-
- ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
- ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
- reset_control_assert(phy_common->ufs_reset);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
-
-MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
-MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index bfb22f868857..e34e4475027c 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -111,6 +111,7 @@ struct rcar_gen3_chan {
struct work_struct work;
struct mutex lock; /* protects rphys[...].powered */
enum usb_dr_mode dr_mode;
+ int irq;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
@@ -389,12 +390,40 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
rcar_gen3_device_recognition(ch);
}
+static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+{
+ struct rcar_gen3_chan *ch = _ch;
+ void __iomem *usb2_base = ch->base;
+ u32 status = readl(usb2_base + USB2_OBINTSTA);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (status & USB2_OBINT_BITS) {
+ dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
+ writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static int rcar_gen3_phy_usb2_init(struct phy *p)
{
struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
+ int ret;
+
+ if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
+ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+ ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
+ IRQF_SHARED, dev_name(channel->dev), channel);
+ if (ret < 0) {
+ dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
+ return ret;
+ }
+ }
/* Initialize USB2 part */
val = readl(usb2_base + USB2_INT_ENABLE);
@@ -433,6 +462,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
val &= ~USB2_INT_ENABLE_UCOM_INTEN;
writel(val, usb2_base + USB2_INT_ENABLE);
+ if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
+ free_irq(channel->irq, channel);
+
return 0;
}
@@ -503,23 +535,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
.owner = THIS_MODULE,
};
-static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
-{
- struct rcar_gen3_chan *ch = _ch;
- void __iomem *usb2_base = ch->base;
- u32 status = readl(usb2_base + USB2_OBINTSTA);
- irqreturn_t ret = IRQ_NONE;
-
- if (status & USB2_OBINT_BITS) {
- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
- writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
- rcar_gen3_device_recognition(ch);
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
@@ -598,7 +613,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct resource *res;
const struct phy_ops *phy_usb2_ops;
- int irq, ret = 0, i;
+ int ret = 0, i;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -614,16 +629,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (IS_ERR(channel->base))
return PTR_ERR(channel->base);
- /* call request_irq for OTG */
- irq = platform_get_irq_optional(pdev, 0);
- if (irq >= 0) {
- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
- irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
- IRQF_SHARED, dev_name(dev), channel);
- if (irq < 0)
- dev_err(dev, "No irq handler (%d)\n", irq);
- }
-
+ /* get irq number here and request_irq for OTG in phy_init */
+ channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 24563160197f..70a31251b202 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -347,7 +347,7 @@ struct usb3phy_reg {
};
/**
- * struct rockchip_usb3phy_port_cfg: usb3-phy port configuration.
+ * struct rockchip_usb3phy_port_cfg - usb3-phy port configuration.
* @reg: the base address for usb3-phy config.
* @typec_conn_dir: the register of type-c connector direction.
* @usb3tousb2_en: the register of type-c force usb2 to usb2 enable.
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 9e483d1fdaf2..e20d2fcc9fe7 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -3,23 +3,23 @@
# Phy drivers for Samsung platforms
#
config PHY_EXYNOS_DP_VIDEO
- tristate "EXYNOS SoC series Display Port PHY driver"
+ tristate "Exynos SoC series Display Port PHY driver"
depends on OF
depends on ARCH_EXYNOS || COMPILE_TEST
default ARCH_EXYNOS
select GENERIC_PHY
help
- Support for Display Port PHY found on Samsung EXYNOS SoCs.
+ Support for Display Port PHY found on Samsung Exynos SoCs.
config PHY_EXYNOS_MIPI_VIDEO
- tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+ tristate "S5P/Exynos SoC series MIPI CSI-2/DSI PHY driver"
depends on HAS_IOMEM
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select GENERIC_PHY
default y if ARCH_S5PV210 || ARCH_EXYNOS
help
Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
- and EXYNOS SoCs.
+ and Exynos SoCs.
config PHY_EXYNOS_PCIE
bool "Exynos PCIe PHY driver"
@@ -29,6 +29,15 @@ config PHY_EXYNOS_PCIE
Enable PCIe PHY support for Exynos SoC series.
This driver provides PHY interface for Exynos PCIe controller.
+config PHY_SAMSUNG_UFS
+ tristate "SAMSUNG SoC series UFS PHY driver"
+ depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support the Samsung UFS PHY driver for
+ Samsung SoCs. This driver provides the interface for UFS
+ host controller to do PHY related programming.
+
config PHY_SAMSUNG_USB2
tristate "Samsung USB 2.0 PHY driver"
depends on HAS_IOMEM
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index db9b1aa0de6e..3959100fe8a2 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o
+obj-$(CONFIG_PHY_SAMSUNG_UFS) += phy-samsung-ufs.o
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
phy-exynos-usb2-y += phy-samsung-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index 6c607df1dc9a..2b670ef91deb 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS SoC series Display Port PHY driver
+ * Samsung Exynos SoC series Display Port PHY driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -115,5 +115,5 @@ static struct platform_driver exynos_dp_video_phy_driver = {
module_platform_driver(exynos_dp_video_phy_driver);
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung EXYNOS SoC DP PHY driver");
+MODULE_DESCRIPTION("Samsung Exynos SoC DP PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index bb51195f189f..c1df1ef3ee3c 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
+ * Samsung S5P/Exynos SoC series MIPI CSIS/DSIM DPHY driver
*
* Copyright (C) 2013,2016 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -364,6 +364,6 @@ static struct platform_driver exynos_mipi_video_phy_driver = {
};
module_platform_driver(exynos_mipi_video_phy_driver);
-MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI CSI-2/DSI PHY driver");
+MODULE_DESCRIPTION("Samsung S5P/Exynos SoC MIPI CSI-2/DSI PHY driver");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 659e7ae0a6cf..7e28b1aea0d1 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS SoC series PCIe PHY driver
+ * Samsung Exynos SoC series PCIe PHY driver
*
* Phy provider for PCIe controller on Exynos SoC series
*
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index e510732afb8b..0d818b77a0d8 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Samsung EXYNOS5 SoC series USB DRD PHY driver
+ * Samsung Exynos5 SoC series USB DRD PHY driver
*
* Phy provider for USB 3.0 DRD controller on Exynos5 SoC series
*
@@ -33,7 +33,7 @@
#define EXYNOS5_FSEL_24MHZ 0x5
#define EXYNOS5_FSEL_50MHZ 0x7
-/* EXYNOS5: USB 3.0 DRD PHY registers */
+/* Exynos5: USB 3.0 DRD PHY registers */
#define EXYNOS5_DRD_LINKSYSTEM 0x04
#define LINKSYSTEM_FLADJ_MASK (0x3f << 1)
@@ -180,14 +180,14 @@ struct exynos5_usbdrd_phy_drvdata {
* @utmiclk: clock for utmi+ phy
* @itpclk: clock for ITP generation
* @drv_data: pointer to SoC level driver data structure
- * @phys[]: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
+ * @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
* @extrefclk: frequency select settings when using 'separate
* reference clocks' for SS and HS operations
* @ref_clk: reference clock to PHY block from which PHY's
* operational clocks are derived
- * vbus: VBUS regulator for phy
- * vbus_boost: Boost regulator for VBUS present on few Exynos boards
+ * @vbus: VBUS regulator for phy
+ * @vbus_boost: Boost regulator for VBUS present on few Exynos boards
*/
struct exynos5_usbdrd_phy {
struct device *dev;
@@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
struct phy_usb_instance *inst = phy_get_drvdata(phy);
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
- return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
+ return exynos5420_usbdrd_phy_calibrate(phy_drd);
+ return 0;
}
static const struct phy_ops exynos5_usbdrd_phy_ops = {
@@ -958,7 +960,7 @@ static struct platform_driver exynos5_usb3drd_phy = {
};
module_platform_driver(exynos5_usb3drd_phy);
-MODULE_DESCRIPTION("Samsung EXYNOS5 SoCs USB 3.0 DRD controller PHY driver");
+MODULE_DESCRIPTION("Samsung Exynos5 SoCs USB 3.0 DRD controller PHY driver");
MODULE_AUTHOR("Vivek Gautam <gautam.vivek@samsung.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos5_usb3drd_phy");
diff --git a/drivers/phy/samsung/phy-exynos7-ufs.h b/drivers/phy/samsung/phy-exynos7-ufs.h
new file mode 100644
index 000000000000..518923141958
--- /dev/null
+++ b/drivers/phy/samsung/phy-exynos7-ufs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS PHY driver data for Samsung EXYNOS7 SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ */
+#ifndef _PHY_EXYNOS7_UFS_H_
+#define _PHY_EXYNOS7_UFS_H_
+
+#include "phy-samsung-ufs.h"
+
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL 0x720
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
+#define EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+
+/* Calibration for phy initialization */
+static const struct samsung_ufs_phy_cfg exynos7_pre_init_cfg[] = {
+ PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x017, 0x84, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x035, 0x58, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x037, 0x40, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x03b, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04c, 0x5b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG(0x05c, 0x14, PWR_MODE_ANY),
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B */
+static const struct samsung_ufs_phy_cfg exynos7_pre_pwr_hs_cfg[] = {
+ PHY_COMN_REG_CFG(0x00f, 0xfa, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x010, 0x82, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x011, 0x1e, PWR_MODE_HS_ANY),
+ /* Setting order: 1st(0x16, 2nd(0x15) */
+ PHY_COMN_REG_CFG(0x016, 0xff, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x015, 0x80, PWR_MODE_HS_ANY),
+ PHY_COMN_REG_CFG(0x017, 0x94, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x036, 0x32, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x037, 0x43, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x038, 0x3f, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x042, 0x88, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x042, 0xbb, PWR_MODE_HS_G2_SER_B),
+ PHY_TRSV_REG_CFG(0x043, 0xa6, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x048, 0x74, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x034, 0x35, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x034, 0x36, PWR_MODE_HS_G2_SER_B),
+ PHY_TRSV_REG_CFG(0x035, 0x5b, PWR_MODE_HS_G2_SER_A),
+ PHY_TRSV_REG_CFG(0x035, 0x5c, PWR_MODE_HS_G2_SER_B),
+ END_UFS_PHY_CFG
+};
+
+/* Calibration for HS mode series A/B atfer PMC */
+static const struct samsung_ufs_phy_cfg exynos7_post_pwr_hs_cfg[] = {
+ PHY_COMN_REG_CFG(0x015, 0x00, PWR_MODE_HS_ANY),
+ PHY_TRSV_REG_CFG(0x04d, 0x83, PWR_MODE_HS_ANY),
+ END_UFS_PHY_CFG
+};
+
+static const struct samsung_ufs_phy_cfg *exynos7_ufs_phy_cfgs[CFG_TAG_MAX] = {
+ [CFG_PRE_INIT] = exynos7_pre_init_cfg,
+ [CFG_PRE_PWR_HS] = exynos7_pre_pwr_hs_cfg,
+ [CFG_POST_PWR_HS] = exynos7_post_pwr_hs_cfg,
+};
+
+static struct samsung_ufs_phy_drvdata exynos7_ufs_phy = {
+ .cfg = exynos7_ufs_phy_cfgs,
+ .isol = {
+ .offset = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL,
+ .mask = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_MASK,
+ .en = EXYNOS7_EMBEDDED_COMBO_PHY_CTRL_EN,
+ },
+ .has_symbol_clk = 1,
+};
+
+#endif /* _PHY_EXYNOS7_UFS_H_ */
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
new file mode 100644
index 000000000000..9832599a0283
--- /dev/null
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS PHY driver for Samsung SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ * Author: Seungwon Jeon <essuuj@gmail.com>
+ * Author: Alim Akhtar <alim.akhtar@samsung.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "phy-samsung-ufs.h"
+
+#define for_each_phy_lane(phy, i) \
+ for (i = 0; i < (phy)->lane_cnt; i++)
+#define for_each_phy_cfg(cfg) \
+ for (; (cfg)->id; (cfg)++)
+
+#define PHY_DEF_LANE_CNT 1
+
+static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
+ const struct samsung_ufs_phy_cfg *cfg,
+ u8 lane)
+{
+ enum {LANE_0, LANE_1}; /* lane index */
+
+ switch (lane) {
+ case LANE_0:
+ writel(cfg->val, (phy)->reg_pma + cfg->off_0);
+ break;
+ case LANE_1:
+ if (cfg->id == PHY_TRSV_BLK)
+ writel(cfg->val, (phy)->reg_pma + cfg->off_1);
+ break;
+ }
+}
+
+static int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ const unsigned int timeout_us = 100000;
+ const unsigned int sleep_us = 10;
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(
+ ufs_phy->reg_pma + PHY_APB_ADDR(PHY_PLL_LOCK_STATUS),
+ val, (val & PHY_PLL_LOCK_BIT), sleep_us, timeout_us);
+ if (err) {
+ dev_err(ufs_phy->dev,
+ "failed to get phy pll lock acquisition %d\n", err);
+ goto out;
+ }
+
+ err = readl_poll_timeout(
+ ufs_phy->reg_pma + PHY_APB_ADDR(PHY_CDR_LOCK_STATUS),
+ val, (val & PHY_CDR_LOCK_BIT), sleep_us, timeout_us);
+ if (err)
+ dev_err(ufs_phy->dev,
+ "failed to get phy cdr lock acquisition %d\n", err);
+out:
+ return err;
+}
+
+static int samsung_ufs_phy_calibrate(struct phy *phy)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ struct samsung_ufs_phy_cfg **cfgs = ufs_phy->cfg;
+ const struct samsung_ufs_phy_cfg *cfg;
+ int err = 0;
+ int i;
+
+ if (unlikely(ufs_phy->ufs_phy_state < CFG_PRE_INIT ||
+ ufs_phy->ufs_phy_state >= CFG_TAG_MAX)) {
+ dev_err(ufs_phy->dev, "invalid phy config index %d\n", ufs_phy->ufs_phy_state);
+ return -EINVAL;
+ }
+
+ cfg = cfgs[ufs_phy->ufs_phy_state];
+ if (!cfg)
+ goto out;
+
+ for_each_phy_cfg(cfg) {
+ for_each_phy_lane(ufs_phy, i) {
+ samsung_ufs_phy_config(ufs_phy, cfg, i);
+ }
+ }
+
+ if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS)
+ err = samsung_ufs_phy_wait_for_lock_acq(phy);
+
+ /**
+ * In Samsung ufshci, PHY need to be calibrated at different
+ * stages / state mainly before Linkstartup, after Linkstartup,
+ * before power mode change and after power mode change.
+ * Below state machine to make sure to calibrate PHY in each
+ * state. Here after configuring PHY in a given state, will
+ * change the state to next state so that next state phy
+ * calibration value can be programed
+ */
+out:
+ switch (ufs_phy->ufs_phy_state) {
+ case CFG_PRE_INIT:
+ ufs_phy->ufs_phy_state = CFG_POST_INIT;
+ break;
+ case CFG_POST_INIT:
+ ufs_phy->ufs_phy_state = CFG_PRE_PWR_HS;
+ break;
+ case CFG_PRE_PWR_HS:
+ ufs_phy->ufs_phy_state = CFG_POST_PWR_HS;
+ break;
+ case CFG_POST_PWR_HS:
+ /* Change back to INIT state */
+ ufs_phy->ufs_phy_state = CFG_PRE_INIT;
+ break;
+ default:
+ dev_err(ufs_phy->dev, "wrong state for phy calibration\n");
+ }
+
+ return err;
+}
+
+static int samsung_ufs_phy_symbol_clk_init(struct samsung_ufs_phy *phy)
+{
+ int ret;
+
+ phy->tx0_symbol_clk = devm_clk_get(phy->dev, "tx0_symbol_clk");
+ if (IS_ERR(phy->tx0_symbol_clk)) {
+ dev_err(phy->dev, "failed to get tx0_symbol_clk clock\n");
+ return PTR_ERR(phy->tx0_symbol_clk);
+ }
+
+ phy->rx0_symbol_clk = devm_clk_get(phy->dev, "rx0_symbol_clk");
+ if (IS_ERR(phy->rx0_symbol_clk)) {
+ dev_err(phy->dev, "failed to get rx0_symbol_clk clock\n");
+ return PTR_ERR(phy->rx0_symbol_clk);
+ }
+
+ phy->rx1_symbol_clk = devm_clk_get(phy->dev, "rx1_symbol_clk");
+ if (IS_ERR(phy->rx1_symbol_clk)) {
+ dev_err(phy->dev, "failed to get rx1_symbol_clk clock\n");
+ return PTR_ERR(phy->rx1_symbol_clk);
+ }
+
+ ret = clk_prepare_enable(phy->tx0_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: tx0_symbol_clk enable failed %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(phy->rx0_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: rx0_symbol_clk enable failed %d\n", __func__, ret);
+ goto out_disable_tx0_clk;
+ }
+
+ ret = clk_prepare_enable(phy->rx1_symbol_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: rx1_symbol_clk enable failed %d\n", __func__, ret);
+ goto out_disable_rx0_clk;
+ }
+
+ return 0;
+
+out_disable_rx0_clk:
+ clk_disable_unprepare(phy->rx0_symbol_clk);
+out_disable_tx0_clk:
+ clk_disable_unprepare(phy->tx0_symbol_clk);
+out:
+ return ret;
+}
+
+static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy)
+{
+ int ret;
+
+ phy->ref_clk = devm_clk_get(phy->dev, "ref_clk");
+ if (IS_ERR(phy->ref_clk))
+ dev_err(phy->dev, "failed to get ref_clk clock\n");
+
+ ret = clk_prepare_enable(phy->ref_clk);
+ if (ret) {
+ dev_err(phy->dev, "%s: ref_clk enable failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ dev_dbg(phy->dev, "UFS MPHY ref_clk_rate = %ld\n", clk_get_rate(phy->ref_clk));
+
+ return 0;
+}
+
+static int samsung_ufs_phy_init(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+ int ret;
+
+ ss_phy->lane_cnt = phy->attrs.bus_width;
+ ss_phy->ufs_phy_state = CFG_PRE_INIT;
+
+ if (ss_phy->drvdata->has_symbol_clk) {
+ ret = samsung_ufs_phy_symbol_clk_init(ss_phy);
+ if (ret)
+ dev_err(ss_phy->dev, "failed to set ufs phy symbol clocks\n");
+ }
+
+ ret = samsung_ufs_phy_clks_init(ss_phy);
+ if (ret)
+ dev_err(ss_phy->dev, "failed to set ufs phy clocks\n");
+
+ ret = samsung_ufs_phy_calibrate(phy);
+ if (ret)
+ dev_err(ss_phy->dev, "ufs phy calibration failed\n");
+
+ return ret;
+}
+
+static int samsung_ufs_phy_power_on(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ samsung_ufs_phy_ctrl_isol(ss_phy, false);
+ return 0;
+}
+
+static int samsung_ufs_phy_power_off(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ samsung_ufs_phy_ctrl_isol(ss_phy, true);
+ return 0;
+}
+
+static int samsung_ufs_phy_set_mode(struct phy *generic_phy,
+ enum phy_mode mode, int submode)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(generic_phy);
+
+ ss_phy->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ ss_phy->mode = mode;
+
+ return 0;
+}
+
+static int samsung_ufs_phy_exit(struct phy *phy)
+{
+ struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy);
+
+ clk_disable_unprepare(ss_phy->ref_clk);
+
+ if (ss_phy->drvdata->has_symbol_clk) {
+ clk_disable_unprepare(ss_phy->tx0_symbol_clk);
+ clk_disable_unprepare(ss_phy->rx0_symbol_clk);
+ clk_disable_unprepare(ss_phy->rx1_symbol_clk);
+ }
+
+ return 0;
+}
+
+static struct phy_ops samsung_ufs_phy_ops = {
+ .init = samsung_ufs_phy_init,
+ .exit = samsung_ufs_phy_exit,
+ .power_on = samsung_ufs_phy_power_on,
+ .power_off = samsung_ufs_phy_power_off,
+ .calibrate = samsung_ufs_phy_calibrate,
+ .set_mode = samsung_ufs_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id samsung_ufs_phy_match[];
+
+static int samsung_ufs_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct samsung_ufs_phy *phy;
+ struct phy *gen_phy;
+ struct phy_provider *phy_provider;
+ const struct samsung_ufs_phy_drvdata *drvdata;
+ int err = 0;
+
+ match = of_match_node(samsung_ufs_phy_match, dev->of_node);
+ if (!match) {
+ err = -EINVAL;
+ dev_err(dev, "failed to get match_node\n");
+ goto out;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ phy->reg_pma = devm_platform_ioremap_resource_byname(pdev, "phy-pma");
+ if (IS_ERR(phy->reg_pma)) {
+ err = PTR_ERR(phy->reg_pma);
+ goto out;
+ }
+
+ phy->reg_pmu = syscon_regmap_lookup_by_phandle(
+ dev->of_node, "samsung,pmu-syscon");
+ if (IS_ERR(phy->reg_pmu)) {
+ err = PTR_ERR(phy->reg_pmu);
+ dev_err(dev, "failed syscon remap for pmu\n");
+ goto out;
+ }
+
+ gen_phy = devm_phy_create(dev, NULL, &samsung_ufs_phy_ops);
+ if (IS_ERR(gen_phy)) {
+ err = PTR_ERR(gen_phy);
+ dev_err(dev, "failed to create PHY for ufs-phy\n");
+ goto out;
+ }
+
+ drvdata = match->data;
+ phy->dev = dev;
+ phy->drvdata = drvdata;
+ phy->cfg = (struct samsung_ufs_phy_cfg **)drvdata->cfg;
+ phy->isol = &drvdata->isol;
+ phy->lane_cnt = PHY_DEF_LANE_CNT;
+
+ phy_set_drvdata(gen_phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ err = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy-provider\n");
+ goto out;
+ }
+out:
+ return err;
+}
+
+static const struct of_device_id samsung_ufs_phy_match[] = {
+ {
+ .compatible = "samsung,exynos7-ufs-phy",
+ .data = &exynos7_ufs_phy,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_ufs_phy_match);
+
+static struct platform_driver samsung_ufs_phy_driver = {
+ .probe = samsung_ufs_phy_probe,
+ .driver = {
+ .name = "samsung-ufs-phy",
+ .of_match_table = samsung_ufs_phy_match,
+ },
+};
+module_platform_driver(samsung_ufs_phy_driver);
+MODULE_DESCRIPTION("Samsung SoC UFS PHY Driver");
+MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
+MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h
new file mode 100644
index 000000000000..5de78710524c
--- /dev/null
+++ b/drivers/phy/samsung/phy-samsung-ufs.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UFS PHY driver for Samsung EXYNOS SoC
+ *
+ * Copyright (C) 2020 Samsung Electronics Co., Ltd.
+ * Author: Seungwon Jeon <essuuj@gmail.com>
+ * Author: Alim Akhtar <alim.akhtar@samsung.com>
+ *
+ */
+#ifndef _PHY_SAMSUNG_UFS_
+#define _PHY_SAMSUNG_UFS_
+
+#define PHY_COMN_BLK 1
+#define PHY_TRSV_BLK 2
+#define END_UFS_PHY_CFG { 0 }
+#define PHY_TRSV_CH_OFFSET 0x30
+#define PHY_APB_ADDR(off) ((off) << 2)
+
+#define PHY_COMN_REG_CFG(o, v, d) { \
+ .off_0 = PHY_APB_ADDR((o)), \
+ .off_1 = 0, \
+ .val = (v), \
+ .desc = (d), \
+ .id = PHY_COMN_BLK, \
+}
+
+#define PHY_TRSV_REG_CFG(o, v, d) { \
+ .off_0 = PHY_APB_ADDR((o)), \
+ .off_1 = PHY_APB_ADDR((o) + PHY_TRSV_CH_OFFSET), \
+ .val = (v), \
+ .desc = (d), \
+ .id = PHY_TRSV_BLK, \
+}
+
+/* UFS PHY registers */
+#define PHY_PLL_LOCK_STATUS 0x1e
+#define PHY_CDR_LOCK_STATUS 0x5e
+
+#define PHY_PLL_LOCK_BIT BIT(5)
+#define PHY_CDR_LOCK_BIT BIT(4)
+
+/* description for PHY calibration */
+enum {
+ /* applicable to any */
+ PWR_DESC_ANY = 0,
+ /* mode */
+ PWR_DESC_PWM = 1,
+ PWR_DESC_HS = 2,
+ /* series */
+ PWR_DESC_SER_A = 1,
+ PWR_DESC_SER_B = 2,
+ /* gear */
+ PWR_DESC_G1 = 1,
+ PWR_DESC_G2 = 2,
+ PWR_DESC_G3 = 3,
+ /* field mask */
+ MD_MASK = 0x3,
+ SR_MASK = 0x3,
+ GR_MASK = 0x7,
+};
+
+#define PWR_MODE_HS_G1_ANY PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_ANY)
+#define PWR_MODE_HS_G1_SER_A PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G1_SER_B PWR_MODE_HS(PWR_DESC_G1, PWR_DESC_SER_B)
+#define PWR_MODE_HS_G2_ANY PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_ANY)
+#define PWR_MODE_HS_G2_SER_A PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G2_SER_B PWR_MODE_HS(PWR_DESC_G2, PWR_DESC_SER_B)
+#define PWR_MODE_HS_G3_ANY PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_ANY)
+#define PWR_MODE_HS_G3_SER_A PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_SER_A)
+#define PWR_MODE_HS_G3_SER_B PWR_MODE_HS(PWR_DESC_G3, PWR_DESC_SER_B)
+#define PWR_MODE(g, s, m) ((((g) & GR_MASK) << 4) |\
+ (((s) & SR_MASK) << 2) | ((m) & MD_MASK))
+#define PWR_MODE_PWM_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_PWM)
+#define PWR_MODE_HS(g, s) ((((g) & GR_MASK) << 4) |\
+ (((s) & SR_MASK) << 2) | PWR_DESC_HS)
+#define PWR_MODE_HS_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_HS)
+#define PWR_MODE_ANY PWR_MODE(PWR_DESC_ANY,\
+ PWR_DESC_ANY, PWR_DESC_ANY)
+/* PHY calibration point/state */
+enum {
+ CFG_PRE_INIT,
+ CFG_POST_INIT,
+ CFG_PRE_PWR_HS,
+ CFG_POST_PWR_HS,
+ CFG_TAG_MAX,
+};
+
+struct samsung_ufs_phy_cfg {
+ u32 off_0;
+ u32 off_1;
+ u32 val;
+ u8 desc;
+ u8 id;
+};
+
+struct samsung_ufs_phy_drvdata {
+ const struct samsung_ufs_phy_cfg **cfg;
+ struct pmu_isol {
+ u32 offset;
+ u32 mask;
+ u32 en;
+ } isol;
+ bool has_symbol_clk;
+};
+
+struct samsung_ufs_phy {
+ struct device *dev;
+ void __iomem *reg_pma;
+ struct regmap *reg_pmu;
+ struct clk *ref_clk;
+ struct clk *ref_clk_parent;
+ struct clk *tx0_symbol_clk;
+ struct clk *rx0_symbol_clk;
+ struct clk *rx1_symbol_clk;
+ const struct samsung_ufs_phy_drvdata *drvdata;
+ struct samsung_ufs_phy_cfg **cfg;
+ const struct pmu_isol *isol;
+ u8 lane_cnt;
+ int ufs_phy_state;
+ enum phy_mode mode;
+};
+
+static inline struct samsung_ufs_phy *get_samsung_ufs_phy(struct phy *phy)
+{
+ return (struct samsung_ufs_phy *)phy_get_drvdata(phy);
+}
+
+static inline void samsung_ufs_phy_ctrl_isol(
+ struct samsung_ufs_phy *phy, u32 isol)
+{
+ regmap_update_bits(phy->reg_pmu, phy->isol->offset,
+ phy->isol->mask, isol ? 0 : phy->isol->en);
+}
+
+#include "phy-exynos7-ufs.h"
+
+#endif /* _PHY_SAMSUNG_UFS_ */
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 090aa02e02de..a3ed3ff04690 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -255,7 +255,7 @@ static struct platform_driver samsung_usb2_phy_driver = {
};
module_platform_driver(samsung_usb2_phy_driver);
-MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC USB PHY driver");
+MODULE_DESCRIPTION("Samsung S5P/Exynos SoC USB PHY driver");
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 56bdea4b0bd9..2b3639cba51a 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -327,7 +327,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
if (IS_ERR(usbphyc->base))
return PTR_ERR(usbphyc->base);
- usbphyc->clk = devm_clk_get(dev, 0);
+ usbphyc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(usbphyc->clk)) {
ret = PTR_ERR(usbphyc->clk);
dev_err(dev, "clk get failed: %d\n", ret);
@@ -340,7 +340,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
return ret;
}
- usbphyc->rst = devm_reset_control_get(dev, 0);
+ usbphyc->rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(usbphyc->rst)) {
reset_control_assert(usbphyc->rst);
udelay(2);
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index 26f194779064..57adc08a89b2 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -82,17 +82,16 @@ static int dm816x_usb_phy_init(struct phy *x)
{
struct dm816x_usb_phy *phy = phy_get_drvdata(x);
unsigned int val;
- int error;
if (clk_get_rate(phy->refclk) != 24000000)
dev_warn(phy->dev, "nonstandard phy refclk\n");
/* Set PLL ref clock and put phys to sleep */
- error = regmap_update_bits(phy->syscon, phy->usb_ctrl,
- DM816X_USB_CTRL_PHYCLKSRC |
- DM816X_USB_CTRL_PHYSLEEP1 |
- DM816X_USB_CTRL_PHYSLEEP0,
- 0);
+ regmap_update_bits(phy->syscon, phy->usb_ctrl,
+ DM816X_USB_CTRL_PHYCLKSRC |
+ DM816X_USB_CTRL_PHYSLEEP1 |
+ DM816X_USB_CTRL_PHYSLEEP0,
+ 0);
regmap_read(phy->syscon, phy->usb_ctrl, &val);
if ((val & 3) != 0)
dev_info(phy->dev,
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index a87946589eb7..e9332c90f75f 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -337,7 +337,6 @@ static int ti_pipe3_power_on(struct phy *x)
{
u32 val;
u32 mask;
- int ret;
unsigned long rate;
struct ti_pipe3 *phy = phy_get_drvdata(x);
bool rx_pending = false;
@@ -355,8 +354,8 @@ static int ti_pipe3_power_on(struct phy *x)
rate = rate / 1000000;
mask = OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK;
val = rate << OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT;
- ret = regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
- mask, val);
+ regmap_update_bits(phy->phy_power_syscon, phy->power_reg,
+ mask, val);
/*
* For PCIe, TX and RX must be powered on simultaneously.
* For USB and SATA, TX must be powered on before RX
diff --git a/drivers/phy/xilinx/Kconfig b/drivers/phy/xilinx/Kconfig
new file mode 100644
index 000000000000..d8b0d46b2b4d
--- /dev/null
+++ b/drivers/phy/xilinx/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+#
+# PHY drivers for Xilinx platforms
+#
+
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
diff --git a/drivers/phy/xilinx/Makefile b/drivers/phy/xilinx/Makefile
new file mode 100644
index 000000000000..3f1f6a2a9b45
--- /dev/null
+++ b/drivers/phy/xilinx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
new file mode 100644
index 000000000000..2b0f921b6ee3
--- /dev/null
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -0,0 +1,993 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
+ *
+ * Copyright (C) 2018-2020 Xilinx Inc.
+ *
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This driver is tested for USB, SATA and Display Port currently.
+ * Other controllers PCIe and SGMII should also work but that is
+ * experimental as of now.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+/*
+ * Lane Registers
+ */
+
+/* TX De-emphasis parameters */
+#define L0_TX_ANA_TM_18 0x0048
+#define L0_TX_ANA_TM_118 0x01d8
+#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
+
+/* DN Resistor calibration code parameters */
+#define L0_TXPMA_ST_3 0x0b0c
+#define L0_DN_CALIB_CODE 0x3f
+
+/* PMA control parameters */
+#define L0_TXPMD_TM_45 0x0cb4
+#define L0_TXPMD_TM_48 0x0cc0
+#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
+#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
+#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
+#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
+
+/* PCS control parameters */
+#define L0_TM_DIG_6 0x106c
+#define L0_TM_DIS_DESCRAMBLE_DECODER 0x0f
+#define L0_TX_DIG_61 0x00f4
+#define L0_TM_DISABLE_SCRAMBLE_ENCODER 0x0f
+
+/* PLL Test Mode register parameters */
+#define L0_TM_PLL_DIG_37 0x2094
+#define L0_TM_COARSE_CODE_LIMIT 0x10
+
+/* PLL SSC step size offsets */
+#define L0_PLL_SS_STEPS_0_LSB 0x2368
+#define L0_PLL_SS_STEPS_1_MSB 0x236c
+#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
+#define L0_PLL_SS_STEP_SIZE_1 0x2374
+#define L0_PLL_SS_STEP_SIZE_2 0x2378
+#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237c
+#define L0_PLL_STATUS_READ_1 0x23e4
+
+/* SSC step size parameters */
+#define STEP_SIZE_0_MASK 0xff
+#define STEP_SIZE_1_MASK 0xff
+#define STEP_SIZE_2_MASK 0xff
+#define STEP_SIZE_3_MASK 0x3
+#define STEP_SIZE_SHIFT 8
+#define FORCE_STEP_SIZE 0x10
+#define FORCE_STEPS 0x20
+#define STEPS_0_MASK 0xff
+#define STEPS_1_MASK 0x07
+
+/* Reference clock selection parameters */
+#define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
+#define L0_REF_CLK_SEL_MASK 0x8f
+
+/* Calibration digital logic parameters */
+#define L3_TM_CALIB_DIG19 0xec4c
+#define L3_CALIB_DONE_STATUS 0xef14
+#define L3_TM_CALIB_DIG18 0xec48
+#define L3_TM_CALIB_DIG19_NSW 0x07
+#define L3_TM_CALIB_DIG18_NSW 0xe0
+#define L3_TM_OVERRIDE_NSW_CODE 0x20
+#define L3_CALIB_DONE 0x02
+#define L3_NSW_SHIFT 5
+#define L3_NSW_PIPE_SHIFT 4
+#define L3_NSW_CALIB_SHIFT 3
+
+#define PHY_REG_OFFSET 0x4000
+
+/*
+ * Global Registers
+ */
+
+/* Refclk selection parameters */
+#define PLL_REF_SEL(n) (0x10000 + (n) * 4)
+#define PLL_FREQ_MASK 0x1f
+#define PLL_STATUS_LOCKED 0x10
+
+/* Inter Connect Matrix parameters */
+#define ICM_CFG0 0x10010
+#define ICM_CFG1 0x10014
+#define ICM_CFG0_L0_MASK 0x07
+#define ICM_CFG0_L1_MASK 0x70
+#define ICM_CFG1_L2_MASK 0x07
+#define ICM_CFG2_L3_MASK 0x70
+#define ICM_CFG_SHIFT 4
+
+/* Inter Connect Matrix allowed protocols */
+#define ICM_PROTOCOL_PD 0x0
+#define ICM_PROTOCOL_PCIE 0x1
+#define ICM_PROTOCOL_SATA 0x2
+#define ICM_PROTOCOL_USB 0x3
+#define ICM_PROTOCOL_DP 0x4
+#define ICM_PROTOCOL_SGMII 0x5
+
+/* Test Mode common reset control parameters */
+#define TM_CMN_RST 0x10018
+#define TM_CMN_RST_EN 0x1
+#define TM_CMN_RST_SET 0x2
+#define TM_CMN_RST_MASK 0x3
+
+/* Bus width parameters */
+#define TX_PROT_BUS_WIDTH 0x10040
+#define RX_PROT_BUS_WIDTH 0x10044
+#define PROT_BUS_WIDTH_10 0x0
+#define PROT_BUS_WIDTH_20 0x1
+#define PROT_BUS_WIDTH_40 0x2
+#define PROT_BUS_WIDTH_SHIFT 2
+
+/* Number of GT lanes */
+#define NUM_LANES 4
+
+/* SIOU SATA control register */
+#define SATA_CONTROL_OFFSET 0x0100
+
+/* Total number of controllers */
+#define CONTROLLERS_PER_LANE 5
+
+/* Protocol Type parameters */
+#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
+#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
+#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
+#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
+#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
+#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
+#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
+#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
+#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
+#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
+#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
+#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
+#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
+#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
+
+/* Timeout values */
+#define TIMEOUT_US 1000
+
+struct xpsgtr_dev;
+
+/**
+ * struct xpsgtr_ssc - structure to hold SSC settings for a lane
+ * @refclk_rate: PLL reference clock frequency
+ * @pll_ref_clk: value to be written to register for corresponding ref clk rate
+ * @steps: number of steps of SSC (Spread Spectrum Clock)
+ * @step_size: step size of each step
+ */
+struct xpsgtr_ssc {
+ u32 refclk_rate;
+ u8 pll_ref_clk;
+ u32 steps;
+ u32 step_size;
+};
+
+/**
+ * struct xpsgtr_phy - representation of a lane
+ * @phy: pointer to the kernel PHY device
+ * @type: controller which uses this lane
+ * @lane: lane number
+ * @protocol: protocol in which the lane operates
+ * @skip_phy_init: skip phy_init() if true
+ * @dev: pointer to the xpsgtr_dev instance
+ * @refclk: reference clock index
+ */
+struct xpsgtr_phy {
+ struct phy *phy;
+ u8 type;
+ u8 lane;
+ u8 protocol;
+ bool skip_phy_init;
+ struct xpsgtr_dev *dev;
+ unsigned int refclk;
+};
+
+/**
+ * struct xpsgtr_dev - representation of a ZynMP GT device
+ * @dev: pointer to device
+ * @serdes: serdes base address
+ * @siou: siou base address
+ * @gtr_mutex: mutex for locking
+ * @phys: PHY lanes
+ * @refclk_sscs: spread spectrum settings for the reference clocks
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
+ */
+struct xpsgtr_dev {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *siou;
+ struct mutex gtr_mutex; /* mutex for locking */
+ struct xpsgtr_phy phys[NUM_LANES];
+ const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
+};
+
+/*
+ * Configuration Data
+ */
+
+/* lookup table to hold all settings needed for a ref clock frequency */
+static const struct xpsgtr_ssc ssc_lookup[] = {
+ { 19200000, 0x05, 608, 264020 },
+ { 20000000, 0x06, 634, 243454 },
+ { 24000000, 0x07, 760, 168973 },
+ { 26000000, 0x08, 824, 143860 },
+ { 27000000, 0x09, 856, 86551 },
+ { 38400000, 0x0a, 1218, 65896 },
+ { 40000000, 0x0b, 634, 243454 },
+ { 52000000, 0x0c, 824, 143860 },
+ { 100000000, 0x0d, 1058, 87533 },
+ { 108000000, 0x0e, 856, 86551 },
+ { 125000000, 0x0f, 992, 119497 },
+ { 135000000, 0x10, 1070, 55393 },
+ { 150000000, 0x11, 792, 187091 }
+};
+
+/*
+ * I/O Accessors
+ */
+
+static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
+{
+ return readl(gtr_dev->serdes + reg);
+}
+
+static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
+{
+ writel(value, gtr_dev->serdes + reg);
+}
+
+static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
+ u32 clr, u32 set)
+{
+ u32 value = xpsgtr_read(gtr_dev, reg);
+
+ value &= ~clr;
+ value |= set;
+ xpsgtr_write(gtr_dev, reg, value);
+}
+
+static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ return readl(addr);
+}
+
+static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
+ u32 reg, u32 value)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ writel(value, addr);
+}
+
+static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
+ u32 reg, u32 clr, u32 set)
+{
+ void __iomem *addr = gtr_phy->dev->serdes
+ + gtr_phy->lane * PHY_REG_OFFSET + reg;
+
+ writel((readl(addr) & ~clr) | set, addr);
+}
+
+/*
+ * Hardware Configuration
+ */
+
+/* Wait for the PLL to lock (with a timeout). */
+static int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ unsigned int timeout = TIMEOUT_US;
+ int ret;
+
+ dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
+
+ while (1) {
+ u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
+
+ if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
+ ret = 0;
+ break;
+ }
+
+ if (--timeout == 0) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ udelay(1);
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(gtr_dev->dev,
+ "lane %u (type %u, protocol %u): PLL lock timeout\n",
+ gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
+
+ return ret;
+}
+
+/* Configure PLL and spread-sprectrum clock. */
+static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+{
+ const struct xpsgtr_ssc *ssc;
+ u32 step_size;
+
+ ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
+ step_size = ssc->step_size;
+
+ xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
+ PLL_FREQ_MASK, ssc->pll_ref_clk);
+
+ /* Enable lane clock sharing, if required */
+ if (gtr_phy->refclk != gtr_phy->lane) {
+ /* Lane3 Ref Clock Selection Register */
+ xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
+ L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
+ }
+
+ /* SSC step size [7:0] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
+ STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
+
+ /* SSC step size [15:8] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
+ STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
+
+ /* SSC step size [23:16] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
+ STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
+
+ /* SSC steps [7:0] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
+ STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
+
+ /* SSC steps [10:8] */
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
+ STEPS_1_MASK,
+ (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
+
+ /* SSC step size [24:25] */
+ step_size >>= STEP_SIZE_SHIFT;
+ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
+ STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
+ FORCE_STEP_SIZE | FORCE_STEPS);
+}
+
+/* Configure the lane protocol. */
+static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u8 protocol = gtr_phy->protocol;
+
+ switch (gtr_phy->lane) {
+ case 0:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
+ break;
+ case 1:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
+ protocol << ICM_CFG_SHIFT);
+ break;
+ case 2:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
+ break;
+ case 3:
+ xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
+ protocol << ICM_CFG_SHIFT);
+ break;
+ default:
+ /* We already checked 0 <= lane <= 3 */
+ break;
+ }
+}
+
+/* Bypass (de)scrambler and 8b/10b decoder and encoder. */
+static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
+{
+ xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
+ xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
+}
+
+/* DP-specific initialization. */
+static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
+{
+ xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
+ L0_TXPMD_TM_45_OVER_DP_MAIN |
+ L0_TXPMD_TM_45_ENABLE_DP_MAIN |
+ L0_TXPMD_TM_45_OVER_DP_POST1 |
+ L0_TXPMD_TM_45_OVER_DP_POST2 |
+ L0_TXPMD_TM_45_ENABLE_DP_POST2);
+ xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
+ L0_TX_ANA_TM_118_FORCE_17_0);
+}
+
+/* SATA-specific initialization. */
+static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+
+ xpsgtr_bypass_scrambler_8b10b(gtr_phy);
+
+ writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
+}
+
+/* SGMII-specific initialization. */
+static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+
+ /* Set SGMII protocol TX and RX bus width to 10 bits. */
+ xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
+ PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
+ PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+
+ xpsgtr_bypass_scrambler_8b10b(gtr_phy);
+}
+
+/* Configure TX de-emphasis and margining for DP. */
+static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
+ unsigned int voltage)
+{
+ static const u8 voltage_swing[4][4] = {
+ { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff }
+ };
+ static const u8 pre_emphasis[4][4] = {
+ { 0x02, 0x02, 0x02, 0x02 },
+ { 0x01, 0x01, 0x01, 0xff },
+ { 0x00, 0x00, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff }
+ };
+
+ xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
+ xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
+}
+
+/*
+ * PHY Operations
+ */
+
+static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
+{
+ /*
+ * As USB may save the snapshot of the states during hibernation, doing
+ * phy_init() will put the USB controller into reset, resulting in the
+ * losing of the saved snapshot. So try to avoid phy_init() for USB
+ * except when gtr_phy->skip_phy_init is false (this happens when FPD is
+ * shutdown during suspend or when gt lane is changed from current one)
+ */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
+ return false;
+ else
+ return true;
+}
+
+/*
+ * There is a functional issue in the GT. The TX termination resistance can be
+ * out of spec due to a issue in the calibration logic. This is the workaround
+ * to fix it, required for XCZU9EG silicon.
+ */
+static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 timeout = TIMEOUT_US;
+ u32 nsw;
+
+ /* Enabling Test Mode control for CMN Rest */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ /* Set Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
+
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
+
+ /*
+ * As a part of work around sequence for PMOS calibration fix,
+ * we need to configure any lane ICM_CFG to valid protocol. This
+ * will deassert the CMN_Resetn signal.
+ */
+ xpsgtr_lane_set_protocol(gtr_phy);
+
+ /* Clear Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ dev_dbg(gtr_dev->dev, "calibrating...\n");
+
+ do {
+ u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
+
+ if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "calibration time out\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(1);
+ } while (timeout > 0);
+
+ dev_dbg(gtr_dev->dev, "calibration done\n");
+
+ /* Reading NMOS Register Code */
+ nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
+
+ /* Set Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
+
+ /* Writing NMOS register values back [5:3] */
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
+
+ /* Writing NMOS register value [2:0] */
+ xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
+ ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
+ (1 << L3_NSW_PIPE_SHIFT));
+
+ /* Clear Test Mode reset */
+ xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
+
+ return 0;
+}
+
+static int xpsgtr_phy_init(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ int ret = 0;
+
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Skip initialization if not required. */
+ if (!xpsgtr_phy_init_required(gtr_phy))
+ goto out;
+
+ if (gtr_dev->tx_term_fix) {
+ ret = xpsgtr_phy_tx_term_fix(gtr_phy);
+ if (ret < 0)
+ goto out;
+
+ gtr_dev->tx_term_fix = false;
+ }
+
+ /* Enable coarse code saturation limiting logic. */
+ xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
+
+ /*
+ * Configure the PLL, the lane protocol, and perform protocol-specific
+ * initialization.
+ */
+ xpsgtr_configure_pll(gtr_phy);
+ xpsgtr_lane_set_protocol(gtr_phy);
+
+ switch (gtr_phy->protocol) {
+ case ICM_PROTOCOL_DP:
+ xpsgtr_phy_init_dp(gtr_phy);
+ break;
+
+ case ICM_PROTOCOL_SATA:
+ xpsgtr_phy_init_sata(gtr_phy);
+ break;
+
+ case ICM_PROTOCOL_SGMII:
+ xpsgtr_phy_init_sgmii(gtr_phy);
+ break;
+ }
+
+out:
+ mutex_unlock(&gtr_dev->gtr_mutex);
+ return ret;
+}
+
+static int xpsgtr_phy_exit(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ gtr_phy->skip_phy_init = false;
+
+ return 0;
+}
+
+static int xpsgtr_phy_power_on(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ int ret = 0;
+
+ /*
+ * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
+ * cumulating waits for both lanes. The user is expected to initialize
+ * lane 0 last.
+ */
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
+ gtr_phy->type == XPSGTR_TYPE_DP_0)
+ ret = xpsgtr_wait_pll_lock(phy);
+
+ return ret;
+}
+
+static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP)
+ return 0;
+
+ xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
+
+ return 0;
+}
+
+static const struct phy_ops xpsgtr_phyops = {
+ .init = xpsgtr_phy_init,
+ .exit = xpsgtr_phy_exit,
+ .power_on = xpsgtr_phy_power_on,
+ .configure = xpsgtr_phy_configure,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * OF Xlate Support
+ */
+
+/* Set the lane type and protocol based on the PHY type and instance number. */
+static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
+ unsigned int phy_instance)
+{
+ unsigned int num_phy_types;
+ const int *phy_types;
+
+ switch (phy_type) {
+ case PHY_TYPE_SATA: {
+ static const int types[] = {
+ XPSGTR_TYPE_SATA_0,
+ XPSGTR_TYPE_SATA_1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_SATA;
+ break;
+ }
+ case PHY_TYPE_USB3: {
+ static const int types[] = {
+ XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_USB1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_USB;
+ break;
+ }
+ case PHY_TYPE_DP: {
+ static const int types[] = {
+ XPSGTR_TYPE_DP_0,
+ XPSGTR_TYPE_DP_1,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_DP;
+ break;
+ }
+ case PHY_TYPE_PCIE: {
+ static const int types[] = {
+ XPSGTR_TYPE_PCIE_0,
+ XPSGTR_TYPE_PCIE_1,
+ XPSGTR_TYPE_PCIE_2,
+ XPSGTR_TYPE_PCIE_3,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_PCIE;
+ break;
+ }
+ case PHY_TYPE_SGMII: {
+ static const int types[] = {
+ XPSGTR_TYPE_SGMII0,
+ XPSGTR_TYPE_SGMII1,
+ XPSGTR_TYPE_SGMII2,
+ XPSGTR_TYPE_SGMII3,
+ };
+
+ phy_types = types;
+ num_phy_types = ARRAY_SIZE(types);
+ gtr_phy->protocol = ICM_PROTOCOL_SGMII;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (phy_instance >= num_phy_types)
+ return -EINVAL;
+
+ gtr_phy->type = phy_types[phy_instance];
+ return 0;
+}
+
+/*
+ * Valid combinations of controllers and lanes (Interconnect Matrix).
+ */
+static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
+ { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
+ { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
+ { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
+ { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+};
+
+/* Translate OF phandle and args to PHY instance. */
+static struct phy *xpsgtr_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ struct xpsgtr_phy *gtr_phy;
+ unsigned int phy_instance;
+ unsigned int phy_lane;
+ unsigned int phy_type;
+ unsigned int refclk;
+ unsigned int i;
+ int ret;
+
+ if (args->args_count != 4) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Get the PHY parameters from the OF arguments and derive the lane
+ * type.
+ */
+ phy_lane = args->args[0];
+ if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
+ dev_err(dev, "Invalid lane number %u\n", phy_lane);
+ return ERR_PTR(-ENODEV);
+ }
+
+ gtr_phy = &gtr_dev->phys[phy_lane];
+ phy_type = args->args[1];
+ phy_instance = args->args[2];
+
+ ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
+ return ERR_PTR(ret);
+ }
+
+ refclk = args->args[3];
+ if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
+ !gtr_dev->refclk_sscs[refclk]) {
+ dev_err(dev, "Invalid reference clock number %u\n", refclk);
+ return ERR_PTR(-EINVAL);
+ }
+
+ gtr_phy->refclk = refclk;
+
+ /*
+ * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
+ * is allowed to operate on the lane.
+ */
+ for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
+ if (icm_matrix[phy_lane][i] == gtr_phy->type)
+ return gtr_phy->phy;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xpsgtr_suspend(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ /* Save the snapshot ICM_CFG registers. */
+ gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+ return 0;
+}
+
+static int __maybe_unused xpsgtr_resume(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ unsigned int icm_cfg0, icm_cfg1;
+ unsigned int i;
+ bool skip_phy_init;
+
+ icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
+ icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+
+ /* Return if no GT lanes got configured before suspend. */
+ if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
+ return 0;
+
+ /* Check if the ICM configurations changed after suspend. */
+ if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
+ icm_cfg1 == gtr_dev->saved_icm_cfg1)
+ skip_phy_init = true;
+ else
+ skip_phy_init = false;
+
+ /* Update the skip_phy_init for all gtr_phy instances. */
+ for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
+ gtr_dev->phys[i].skip_phy_init = skip_phy_init;
+
+ return 0;
+}
+
+static const struct dev_pm_ops xpsgtr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+};
+
+/*
+ * Probe & Platform Driver
+ */
+
+static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
+{
+ unsigned int refclk;
+
+ for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
+ unsigned long rate;
+ unsigned int i;
+ struct clk *clk;
+ char name[8];
+
+ snprintf(name, sizeof(name), "ref%u", refclk);
+ clk = devm_clk_get_optional(gtr_dev->dev, name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(gtr_dev->dev,
+ "Failed to get reference clock %u: %ld\n",
+ refclk, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ if (!clk)
+ continue;
+
+ /*
+ * Get the spread spectrum (SSC) settings for the reference
+ * clock rate.
+ */
+ rate = clk_get_rate(clk);
+
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ if (rate == ssc_lookup[i].refclk_rate) {
+ gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ssc_lookup)) {
+ dev_err(gtr_dev->dev,
+ "Invalid rate %lu for reference clock %u\n",
+ rate, refclk);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int xpsgtr_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ unsigned int port;
+ int ret;
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+ if (!gtr_dev)
+ return -ENOMEM;
+
+ gtr_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gtr_dev);
+
+ mutex_init(&gtr_dev->gtr_mutex);
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
+ gtr_dev->tx_term_fix =
+ of_property_read_bool(np, "xlnx,tx-termination-fix");
+
+ /* Acquire resources. */
+ gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
+ if (IS_ERR(gtr_dev->serdes))
+ return PTR_ERR(gtr_dev->serdes);
+
+ gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
+ if (IS_ERR(gtr_dev->siou))
+ return PTR_ERR(gtr_dev->siou);
+
+ ret = xpsgtr_get_ref_clocks(gtr_dev);
+ if (ret)
+ return ret;
+
+ /* Create PHYs. */
+ for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
+ struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
+ struct phy *phy;
+
+ gtr_phy->lane = port;
+ gtr_phy->dev = gtr_dev;
+
+ phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ gtr_phy->phy = phy;
+ phy_set_drvdata(phy, gtr_phy);
+ }
+
+ /* Register the PHY provider. */
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+ return PTR_ERR(provider);
+ }
+ return 0;
+}
+
+static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+ .pm = &xpsgtr_pm_ops,
+ },
+};
+
+module_platform_driver(xpsgtr_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index ff1ee159dca2..f8ff30cdafa6 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -7,6 +7,8 @@ config PINCTRL_MSM
select PINCONF
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
config PINCTRL_APQ8064
tristate "Qualcomm APQ8064 pin controller driver"
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 83b7d64bc4c1..c322f30a2064 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
msm_gpio_irq_clear_unmask(d, false);
}
+/**
+ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
+ * @d: The irq dta.
+ *
+ * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are
+ * normally handled by the parent irqchip. The logic here is slightly
+ * different due to what's easy to do with our parent, but in principle it's
+ * the same.
+ */
+static void msm_gpio_update_dual_edge_parent(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+ int loop_limit = 100;
+ unsigned int val;
+ unsigned int type;
+
+ /* Read the value and make a guess about what edge we need to catch */
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+
+ do {
+ /* Set the parent to catch the next edge */
+ irq_chip_set_type_parent(d, type);
+
+ /*
+ * Possibly the line changed between when we last read "val"
+ * (and decided what edge we needed) and when set the edge.
+ * If the value didn't change (or changed and then changed
+ * back) then we're done.
+ */
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ if (type == IRQ_TYPE_EDGE_RISING) {
+ if (!val)
+ return;
+ type = IRQ_TYPE_EDGE_FALLING;
+ } else if (type == IRQ_TYPE_EDGE_FALLING) {
+ if (val)
+ return;
+ type = IRQ_TYPE_EDGE_RISING;
+ }
+ } while (loop_limit-- > 0);
+ dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n");
+}
+
static void msm_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags;
u32 val;
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+ if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+ msm_gpio_update_dual_edge_parent(d);
return;
+ }
g = &pctrl->soc->groups[d->hwirq];
@@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
+static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
+ unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ return type == IRQ_TYPE_EDGE_BOTH &&
+ pctrl->soc->wakeirq_dual_edge_errata && d->parent_data &&
+ test_bit(d->hwirq, pctrl->skip_wake_irqs);
+}
+
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
u32 val;
+ if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
+ set_bit(d->hwirq, pctrl->dual_edge_irqs);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ msm_gpio_update_dual_edge_parent(d);
+ return 0;
+ }
+
if (d->parent_data)
irq_chip_set_type_parent(d, type);
- if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+ clear_bit(d->hwirq, pctrl->dual_edge_irqs);
+ irq_set_handler_locked(d, handle_fasteoi_irq);
return 0;
+ }
g = &pctrl->soc->groups[d->hwirq];
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 9452da18a78b..7486fe08eb9b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map {
* @pull_no_keeper: The SoC does not support keeper bias.
* @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
* @nwakeirq_map: The number of entries in @wakeirq_map
+ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
+ * to be aware that their parent can't handle dual
+ * edge interrupts.
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data {
const int *reserved_gpios;
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
+ bool wakeirq_dual_edge_errata;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 1b6465a882f2..1d9acad3c1ce 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
.ntiles = ARRAY_SIZE(sc7180_tiles),
.wakeirq_map = sc7180_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static int sc7180_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 523a39bd0ff6..425e9441b7ca 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -161,6 +161,11 @@
TRACE_SYMBOL(EC_CMD_ADC_READ), \
TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
TRACE_SYMBOL(EC_CMD_AP_RESET), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \
+ TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \
TRACE_SYMBOL(EC_CMD_CR51_BASE), \
TRACE_SYMBOL(EC_CMD_CR51_LAST), \
TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index ed48917af162..b013445147dd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -1,34 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
+ * Mellanox hotplug driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/bitops.h>
@@ -42,6 +16,7 @@
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <linux/string_helpers.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
@@ -97,6 +72,23 @@ struct mlxreg_hotplug_priv_data {
u8 not_asserted;
};
+/* Environment variables array for udev. */
+static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
+
+static int
+mlxreg_hotplug_udev_event_send(struct kobject *kobj,
+ struct mlxreg_core_data *data, bool action)
+{
+ char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
+ char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
+
+ mlxreg_hotplug_udev_envp[0] = event_str;
+ string_upper(label, data->label);
+ snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
+
+ return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
+}
+
static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_data *data)
{
@@ -104,7 +96,7 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
struct i2c_client *client;
/* Notify user by sending hwmon uevent. */
- kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
+ mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
/*
* Return if adapter number is negative. It could be in case hotplug
@@ -144,7 +136,7 @@ mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_data *data)
{
/* Notify user by sending hwmon uevent. */
- kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
+ mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
if (data->hpdev.client) {
i2c_unregister_device(data->hpdev.client);
@@ -199,17 +191,49 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_core_item *item;
struct mlxreg_core_data *data;
- int num_attrs = 0, id = 0, i, j;
+ unsigned long mask;
+ u32 regval;
+ int num_attrs = 0, id = 0, i, j, k, ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
/* Go over all kinds of items - psu, pwr, fan. */
for (i = 0; i < pdata->counter; i++, item++) {
- num_attrs += item->count;
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ &regval);
+ if (ret)
+ return ret;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
data = item->data;
- /* Go over all units within the item. */
- for (j = 0; j < item->count; j++, data++, id++) {
+
+ /* Go over all unmasked units within item. */
+ mask = item->mask;
+ k = 0;
+ for_each_set_bit(j, &mask, item->count) {
+ if (data->capability) {
+ /*
+ * Read capability register and skip non
+ * relevant attributes.
+ */
+ ret = regmap_read(priv->regmap,
+ data->capability, &regval);
+ if (ret)
+ return ret;
+ if (!(regval & data->bit)) {
+ data++;
+ continue;
+ }
+ }
PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
GFP_KERNEL,
@@ -227,9 +251,13 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
PRIV_DEV_ATTR(id).dev_attr.show =
mlxreg_hotplug_attr_show;
PRIV_DEV_ATTR(id).nr = i;
- PRIV_DEV_ATTR(id).index = j;
+ PRIV_DEV_ATTR(id).index = k;
sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
+ data++;
+ id++;
+ k++;
}
+ num_attrs += k;
}
priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
@@ -507,20 +535,6 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
- if (item->capability) {
- /*
- * Read group capability register to get actual number
- * of interrupt capable components and set group mask
- * accordingly.
- */
- ret = regmap_read(priv->regmap, item->capability,
- &regval);
- if (ret)
- goto out;
-
- item->mask = GENMASK((regval & item->mask) - 1, 0);
- }
-
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index acfaf64ffde6..7646708d57e4 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -30,6 +30,7 @@
* @mlxreg_io_dev_attr: sysfs sensor device attribute array;
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
+ * @regsize: size of a register value;
*/
struct mlxreg_io_priv_data {
struct platform_device *pdev;
@@ -39,27 +40,30 @@ struct mlxreg_io_priv_data {
struct sensor_device_attribute mlxreg_io_dev_attr[MLXREG_IO_ATT_NUM];
struct attribute_group group;
const struct attribute_group *groups[2];
+ int regsize;
};
static int
mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
- bool rw_flag, u32 *regval)
+ bool rw_flag, int regsize, u32 *regval)
{
- int ret;
+ int i, val, ret;
ret = regmap_read(regmap, data->reg, regval);
if (ret)
goto access_error;
/*
- * There are three kinds of attributes: single bit, full register's
- * bits and bit sequence. For the first kind field mask indicates which
- * bits are not related and field bit is set zero. For the second kind
- * field mask is set to zero and field bit is set with all bits one.
- * No special handling for such kind of attributes - pass value as is.
- * For the third kind, field mask indicates which bits are related and
- * field bit is set to the first bit number (from 1 to 32) is the bit
- * sequence.
+ * There are four kinds of attributes: single bit, full register's
+ * bits, bit sequence, bits in few registers For the first kind field
+ * mask indicates which bits are not related and field bit is set zero.
+ * For the second kind field mask is set to zero and field bit is set
+ * with all bits one. No special handling for such kind of attributes -
+ * pass value as is. For the third kind, the field mask indicates which
+ * bits are related and the field bit is set to the first bit number
+ * (from 1 to 32) is the bit sequence. For the fourth kind - the number
+ * of registers which should be read for getting an attribute are
+ * specified through 'data->regnum' field.
*/
if (!data->bit) {
/* Single bit. */
@@ -83,6 +87,19 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
/* Clear relevant bits and set them to new value. */
*regval = (*regval & ~data->mask) | in_val;
}
+ } else {
+ /*
+ * Some attributes could occupied few registers in case regmap
+ * bit size is 8 or 16. Compose such attributes from 'regnum'
+ * registers. Such attributes contain read-only data.
+ */
+ for (i = 1; i < data->regnum; i++) {
+ ret = regmap_read(regmap, data->reg + i, &val);
+ if (ret)
+ goto access_error;
+
+ *regval |= rol32(val, regsize * i);
+ }
}
access_error:
@@ -99,7 +116,8 @@ mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
u32 regval = 0;
int ret;
- ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true, &regval);
+ ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
+ priv->regsize, &regval);
if (ret)
goto access_error;
@@ -128,7 +146,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
return ret;
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
- &regval);
+ priv->regsize, &regval);
if (ret)
goto access_error;
@@ -207,6 +225,9 @@ static int mlxreg_io_probe(struct platform_device *pdev)
}
priv->pdev = pdev;
+ priv->regsize = regmap_get_val_bytes(priv->pdata->regmap);
+ if (priv->regsize < 0)
+ return priv->regsize;
err = mlxreg_io_attr_init(priv);
if (err) {
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0581a54cf562..40219bba6801 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -140,7 +140,7 @@ config ACERHDF
in the same node directory will tell you if it is "acerhdf".
For more information about this driver see
- <http://piie.net/files/acerhdf_README.txt>
+ <https://piie.net/files/acerhdf_README.txt>
If you have an Acer Aspire One netbook, say Y or M
here.
@@ -748,6 +748,27 @@ config THINKPAD_ACPI_HOTKEY_POLL
If you are not sure, say Y here. The driver enables polling only if
it is strictly necessary to do so.
+config INTEL_ATOMISP2_LED
+ tristate "Intel AtomISP2 camera LED driver"
+ depends on GPIOLIB && LEDS_GPIO
+ help
+ Many Bay Trail and Cherry Trail devices come with a camera attached
+ to Intel's Image Signal Processor. Linux currently does not have a
+ driver for these, so they do not work as a camera. Some of these
+ camera's have a LED which is controlled through a GPIO.
+
+ Some of these devices have a firmware issue where the LED gets turned
+ on at boot. This driver will turn the LED off at boot and also allows
+ controlling the LED (repurposing it) through the sysfs LED interface.
+
+ Which GPIO is attached to the LED is usually not described in the
+ ACPI tables, so this driver contains per-system info about the GPIO
+ inside the driver, this means that this driver only works on systems
+ the driver knows about.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_atomisp2_led.
+
config INTEL_ATOMISP2_PM
tristate "Intel AtomISP2 dummy / power-management driver"
depends on PCI && IOSF_MBI && PM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2b85852a1a87..5f823f7eff45 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
# Intel
+obj-$(CONFIG_INTEL_ATOMISP2_LED) += intel_atomisp2_led.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 4df7609b4aa9..a7a0b2e0ceb9 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
* as soon as the upper/lower threshold is reached.
*
* (C) 2009 - Peter Kaestle peter (a) piie.net
- * http://piie.net
+ * https://piie.net
* 2009 Borislav Petkov bp (a) alien8.de
*
* Inspired by and many thanks to:
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 7e3083deb1c5..9aae45a45200 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -277,8 +277,8 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
* MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
* use a `TI LP8545`_.
*
- * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf
- * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf
+ * .. _TI LP8543: https://www.ti.com/lit/ds/symlink/lp8543.pdf
+ * .. _TI LP8545: https://www.ti.com/lit/ds/symlink/lp8545.pdf
*/
static int gmux_get_brightness(struct backlight_device *bd)
@@ -373,14 +373,14 @@ static const struct backlight_ops gmux_bl_ops = {
* switch the panel and the external DP connector and allocates a framebuffer
* for the selected GPU.
*
- * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
- * .. _NXP CBTL06141: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
- * .. _NXP CBTL06142: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
- * .. _TI HD3SS212: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
+ * .. _US 8,687,007 B2: https://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
+ * .. _NXP CBTL06141: https://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _NXP CBTL06142: https://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _TI HD3SS212: https://www.ti.com/lit/ds/symlink/hd3ss212.pdf
* .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
- * .. _TI SN74LV4066A: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
+ * .. _TI SN74LV4066A: https://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
* .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
- * .. _TI TS3DS10224: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
+ * .. _TI TS3DS10224: https://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
*/
static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 8c4d00482ef0..b2e3d1e3b3e9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -110,6 +110,11 @@ static struct quirk_entry quirk_asus_forceals = {
.wmi_force_als_set = true,
};
+static struct quirk_entry quirk_asus_vendor_backlight = {
+ .wmi_backlight_power = true,
+ .wmi_backlight_set_devstate = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -411,6 +416,78 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_forceals,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA401IH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA401II",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA401IU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA401IV",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA401IVC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA502II",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA502IU",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. GA502IV",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
+ },
+ .driver_data = &quirk_asus_vendor_backlight,
+ },
{},
};
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index c25a4286d766..bbdb3e860892 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -255,6 +255,10 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
/* Keyboard backlight change notification */
{ KE_IGNORE, 0x3f, { KEY_RESERVED } },
+ /* Backlight brightness level */
+ { KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x58, { KEY_BRIGHTNESSUP } },
+
/* Mic mute */
{ KE_KEY, 0x150, { KEY_MICMUTE } },
@@ -330,6 +334,15 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
{ KE_IGNORE, KBD_LED_AUTO_100_TOKEN, { KEY_RESERVED } },
};
+/*
+ * Keymap for WMI events of type 0x0012
+ * They are events with extended data
+ */
+static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* Fn-lock button pressed */
+ { KE_IGNORE, 0xe035, { KEY_RESERVED } },
+};
+
static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
@@ -414,10 +427,11 @@ static void dell_wmi_notify(struct wmi_device *wdev,
switch (buffer_entry[1]) {
case 0x0000: /* One key pressed or event occurred */
+ case 0x0012: /* Event with extended data occurred */
if (len > 2)
- dell_wmi_process_key(wdev, 0x0000,
+ dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[2]);
- /* Other entries could contain additional information */
+ /* Extended data is currently ignored */
break;
case 0x0010: /* Sequence of keys pressed */
case 0x0011: /* Sequence of events occurred */
@@ -492,7 +506,7 @@ static void handle_dmi_entry(const struct dmi_header *dm, void *opaque)
u16 keycode = (bios_entry->keycode <
ARRAY_SIZE(bios_to_linux_keycode)) ?
bios_to_linux_keycode[bios_entry->keycode] :
- KEY_RESERVED;
+ (bios_entry->keycode == 0xffff ? KEY_UNKNOWN : KEY_RESERVED);
/*
* Log if we find an entry in the DMI table that we don't
@@ -552,6 +566,7 @@ static int dell_wmi_input_setup(struct wmi_device *wdev)
ARRAY_SIZE(dell_wmi_keymap_type_0000) +
ARRAY_SIZE(dell_wmi_keymap_type_0010) +
ARRAY_SIZE(dell_wmi_keymap_type_0011) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0012) +
1,
sizeof(struct key_entry), GFP_KERNEL);
if (!keymap) {
@@ -596,6 +611,13 @@ static int dell_wmi_input_setup(struct wmi_device *wdev)
pos++;
}
+ /* Append table with events of type 0x0012 */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0012[i];
+ keymap[pos].code |= (0x0012 << 16);
+ pos++;
+ }
+
/*
* Now append also table with "legacy" events of type 0x0000. Some of
* them are reported also on laptops which have scancodes in DMI.
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 04c4da6692d7..a72270932ec3 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -365,7 +365,7 @@ static ssize_t hdaps_variance_show(struct device *dev,
static ssize_t hdaps_temp1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u8 uninitialized_var(temp);
+ u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp);
@@ -378,7 +378,7 @@ static ssize_t hdaps_temp1_show(struct device *dev,
static ssize_t hdaps_temp2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u8 uninitialized_var(temp);
+ u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp);
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 9ee79b74311c..86261970bd8f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -571,7 +571,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev, NULL))
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
dev_info(&dev->dev,
"intel-hid: created platform device\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 0487b606a274..e85d8e58320c 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -299,7 +299,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev, NULL))
+ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
dev_info(&dev->dev,
"intel-vbtn: created platform device\n");
diff --git a/drivers/platform/x86/intel_atomisp2_led.c b/drivers/platform/x86/intel_atomisp2_led.c
new file mode 100644
index 000000000000..5935dfca166f
--- /dev/null
+++ b/drivers/platform/x86/intel_atomisp2_led.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for controlling LEDs for cameras connected to the Intel atomisp2
+ * The main purpose of this driver is to turn off LEDs which are on at boot.
+ *
+ * Copyright (C) 2020 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/* This must be leds-gpio as the leds-gpio driver binds to the name */
+#define DEV_NAME "leds-gpio"
+
+static const struct gpio_led atomisp2_leds[] = {
+ {
+ .name = "atomisp2::camera",
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ },
+};
+
+static const struct gpio_led_platform_data atomisp2_leds_pdata = {
+ .num_leds = ARRAY_SIZE(atomisp2_leds),
+ .leds = atomisp2_leds,
+};
+
+static struct gpiod_lookup_table asus_t100ta_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:02", 8, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static struct gpiod_lookup_table asus_t100chi_lookup = {
+ .dev_id = DEV_NAME,
+ .table = {
+ GPIO_LOOKUP_IDX("INT33FC:01", 24, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
+static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = &asus_t100ta_lookup,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
+ },
+ .driver_data = &asus_t100chi_lookup,
+ },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(dmi, atomisp2_led_systems);
+
+static struct gpiod_lookup_table *gpio_lookup;
+static struct platform_device *pdev;
+
+static int __init atomisp2_led_init(void)
+{
+ const struct dmi_system_id *system;
+
+ system = dmi_first_match(atomisp2_led_systems);
+ if (!system)
+ return -ENODEV;
+
+ gpio_lookup = system->driver_data;
+ gpiod_add_lookup_table(gpio_lookup);
+
+ pdev = platform_device_register_resndata(NULL,
+ DEV_NAME, PLATFORM_DEVID_NONE,
+ NULL, 0, &atomisp2_leds_pdata,
+ sizeof(atomisp2_leds_pdata));
+ if (IS_ERR(pdev))
+ gpiod_remove_lookup_table(gpio_lookup);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static void __exit atomisp2_led_cleanup(void)
+{
+ platform_device_unregister(pdev);
+ gpiod_remove_lookup_table(gpio_lookup);
+}
+
+module_init(atomisp2_led_init);
+module_exit(atomisp2_led_cleanup);
+
+/*
+ * The ACPI INIT method from Asus WMI's code on the T100TA and T200TA turns the
+ * LED on (without the WMI interface allowing further control over the LED).
+ * Ensure we are loaded after asus-nb-wmi so that we turn the LED off again.
+ */
+MODULE_SOFTDEP("pre: asus_nb_wmi");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_DESCRIPTION("Intel atomisp2 camera LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.c b/drivers/platform/x86/intel_cht_int33fe_common.c
index 42dd11623f56..251ed9bac789 100644
--- a/drivers/platform/x86/intel_cht_int33fe_common.c
+++ b/drivers/platform/x86/intel_cht_int33fe_common.c
@@ -29,18 +29,16 @@ static int cht_int33fe_i2c_res_filter(struct acpi_resource *ares, void *data)
static int cht_int33fe_count_i2c_clients(struct device *dev)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
LIST_HEAD(resource_list);
int count = 0;
+ int ret;
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -EINVAL;
-
- acpi_dev_get_resources(adev, &resource_list,
- cht_int33fe_i2c_res_filter, &count);
-
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ cht_int33fe_i2c_res_filter, &count);
acpi_dev_free_resource_list(&resource_list);
+ if (ret < 0)
+ return ret;
return count;
}
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 7c8bdab078cf..338ea5222555 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -415,7 +415,7 @@ static const struct pmc_bit_map tgl_lpm0_map[] = {
{"PCIe_Gen3PLL_OFF_STS", BIT(20)},
{"OPIOPLL_OFF_STS", BIT(21)},
{"OCPLL_OFF_STS", BIT(22)},
- {"AudioPLL_OFF_STS", BIT(23)},
+ {"MainPLL_OFF_STS", BIT(23)},
{"MIPIPLL_OFF_STS", BIT(24)},
{"Fast_XTAL_Osc_OFF_STS", BIT(25)},
{"AC_Ring_Osc_OFF_STS", BIT(26)},
@@ -795,7 +795,7 @@ static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
msleep(10);
val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
- for (index = 0; map[index].name && index < 8; index++) {
+ for (index = 0; index < 8 && map[index].name; index++) {
seq_printf(s, "%-32s\tState: %s\n",
map[index].name,
map[index].bit_mask & val_low ? "Not power gated" :
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index c27548fd386a..90bc7969b199 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -26,6 +26,10 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET 0x04
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET 0x06
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET 0x08
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
@@ -72,6 +76,10 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET 0xde
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET 0xdf
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET 0xe0
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET 0xe1
#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
@@ -1304,6 +1312,32 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1410,6 +1444,32 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1528,6 +1588,58 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld3_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld4_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -1728,6 +1840,8 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+
},
{
.label = "tacho2",
@@ -1735,6 +1849,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho3",
@@ -1742,6 +1857,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho4",
@@ -1749,6 +1865,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho5",
@@ -1756,6 +1873,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(4),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho6",
@@ -1763,6 +1881,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(5),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho7",
@@ -1770,6 +1889,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(6),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho8",
@@ -1777,6 +1897,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(7),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho9",
@@ -1784,6 +1905,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho10",
@@ -1791,6 +1913,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho11",
@@ -1798,6 +1921,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho12",
@@ -1805,6 +1929,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "conf",
@@ -2006,6 +2131,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -2051,6 +2180,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
@@ -2085,6 +2218,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -2122,6 +2259,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index 9b11ef1a401f..6aff6cf41414 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -78,7 +78,6 @@ static const struct gpio_led apu2_leds[] = {
{ .name = "apu:green:1" },
{ .name = "apu:green:2" },
{ .name = "apu:green:3" },
- { .name = "apu:simswap" },
};
static const struct gpio_led_platform_data apu2_leds_pdata = {
@@ -95,8 +94,6 @@ static struct gpiod_lookup_table gpios_led_table = {
NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
NULL, 2, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP,
- NULL, 3, GPIO_ACTIVE_LOW),
}
};
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 4f6e4c342382..c14fd22ba196 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -103,12 +103,12 @@ static enum led_brightness ap_led_get(struct led_classdev *led)
}
// Set the airplane mode LED brightness
-static void ap_led_set(struct led_classdev *led, enum led_brightness value)
+static int ap_led_set(struct led_classdev *led, enum led_brightness value)
{
struct system76_data *data;
data = container_of(led, struct system76_data, ap_led);
- system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
+ return system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
}
// Get the last set keyboard LED brightness
@@ -121,13 +121,13 @@ static enum led_brightness kb_led_get(struct led_classdev *led)
}
// Set the keyboard LED brightness
-static void kb_led_set(struct led_classdev *led, enum led_brightness value)
+static int kb_led_set(struct led_classdev *led, enum led_brightness value)
{
struct system76_data *data;
data = container_of(led, struct system76_data, kb_led);
data->kb_brightness = value;
- system76_set(data, "SKBL", (int)data->kb_brightness);
+ return system76_set(data, "SKBL", (int)data->kb_brightness);
}
// Get the last set keyboard LED color
@@ -313,7 +313,7 @@ static int system76_add(struct acpi_device *acpi_dev)
data->ap_led.name = "system76_acpi::airplane";
data->ap_led.flags = LED_CORE_SUSPENDRESUME;
data->ap_led.brightness_get = ap_led_get;
- data->ap_led.brightness_set = ap_led_set;
+ data->ap_led.brightness_set_blocking = ap_led_set;
data->ap_led.max_brightness = 1;
data->ap_led.default_trigger = "rfkill-none";
err = devm_led_classdev_register(&acpi_dev->dev, &data->ap_led);
@@ -323,7 +323,7 @@ static int system76_add(struct acpi_device *acpi_dev)
data->kb_led.name = "system76_acpi::kbd_backlight";
data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
data->kb_led.brightness_get = kb_led_get;
- data->kb_led.brightness_set = kb_led_set;
+ data->kb_led.brightness_set_blocking = kb_led_set;
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
data->kb_led.max_brightness = 255;
data->kb_toggle_brightness = 72;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0f6fceda5fc0..4864a5c189d4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4030,8 +4030,8 @@ static bool hotkey_notify_6xxx(const u32 hkey,
return true;
case TP_HKEY_EV_THM_CSM_COMPLETED:
pr_debug("EC reports: Thermal Control Command set completed (DYTC)\n");
- /* recommended action: do nothing, we don't have
- * Lenovo ATM information */
+ /* Thermal event - pass on to event handler */
+ tpacpi_driver_event(hkey);
return true;
case TP_HKEY_EV_THM_TRANSFM_CHANGED:
pr_debug("EC reports: Thermal Transformation changed (GMTS)\n");
@@ -6963,10 +6963,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
pr_warn("Cannot enable backlight brightness support, ACPI is already handling it. Refer to the acpi_backlight kernel parameter.\n");
return 1;
}
- } else if (tp_features.bright_acpimode && brightness_enable > 1) {
- pr_notice("Standard ACPI backlight interface not available, thinkpad_acpi native brightness control enabled\n");
+ } else if (!tp_features.bright_acpimode) {
+ pr_notice("ACPI backlight interface not available\n");
+ return 1;
}
+ pr_notice("ACPI native brightness control enabled\n");
+
/*
* Check for module parameter bogosity, note that we
* init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
@@ -7965,7 +7968,7 @@ static struct ibm_struct volume_driver_data = {
* does so, its initial value is meaningless (0x07).
*
* For firmware bugs, refer to:
- * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ * https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
* ----
*
@@ -7990,7 +7993,7 @@ static struct ibm_struct volume_driver_data = {
* mode.
*
* For firmware bugs, refer to:
- * http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ * https://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
* ----
*
@@ -9315,9 +9318,6 @@ static struct ibm_struct mute_led_driver_data = {
#define GET_STOP "BCSG"
#define SET_STOP "BCSS"
-#define START_ATTR "charge_start_threshold"
-#define STOP_ATTR "charge_stop_threshold"
-
enum {
BAT_ANY = 0,
BAT_PRIMARY = 1,
@@ -9603,38 +9603,52 @@ static ssize_t tpacpi_battery_show(int what,
return sprintf(buf, "%d\n", ret);
}
-static ssize_t charge_start_threshold_show(struct device *device,
+static ssize_t charge_control_start_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return tpacpi_battery_show(THRESHOLD_START, device, buf);
}
-static ssize_t charge_stop_threshold_show(struct device *device,
+static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return tpacpi_battery_show(THRESHOLD_STOP, device, buf);
}
-static ssize_t charge_start_threshold_store(struct device *dev,
+static ssize_t charge_control_start_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return tpacpi_battery_store(THRESHOLD_START, dev, buf, count);
}
-static ssize_t charge_stop_threshold_store(struct device *dev,
+static ssize_t charge_control_end_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return tpacpi_battery_store(THRESHOLD_STOP, dev, buf, count);
}
-static DEVICE_ATTR_RW(charge_start_threshold);
-static DEVICE_ATTR_RW(charge_stop_threshold);
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static struct device_attribute dev_attr_charge_start_threshold = __ATTR(
+ charge_start_threshold,
+ 0644,
+ charge_control_start_threshold_show,
+ charge_control_start_threshold_store
+);
+static struct device_attribute dev_attr_charge_stop_threshold = __ATTR(
+ charge_stop_threshold,
+ 0644,
+ charge_control_end_threshold_show,
+ charge_control_end_threshold_store
+);
static struct attribute *tpacpi_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
&dev_attr_charge_start_threshold.attr,
&dev_attr_charge_stop_threshold.attr,
NULL,
@@ -9803,6 +9817,105 @@ static struct ibm_struct lcdshadow_driver_data = {
.write = lcdshadow_write,
};
+/*************************************************************************
+ * DYTC subdriver, for the Lenovo lapmode feature
+ */
+
+#define DYTC_CMD_GET 2 /* To get current IC function and mode */
+#define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */
+
+static bool dytc_lapmode;
+
+static void dytc_lapmode_notify_change(void)
+{
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, "dytc_lapmode");
+}
+
+static int dytc_command(int command, int *output)
+{
+ acpi_handle dytc_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "DYTC", &dytc_handle))) {
+ /* Platform doesn't support DYTC */
+ return -ENODEV;
+ }
+ if (!acpi_evalf(dytc_handle, output, NULL, "dd", command))
+ return -EIO;
+ return 0;
+}
+
+static int dytc_lapmode_get(bool *state)
+{
+ int output, err;
+
+ err = dytc_command(DYTC_CMD_GET, &output);
+ if (err)
+ return err;
+ *state = output & BIT(DYTC_GET_LAPMODE_BIT) ? true : false;
+ return 0;
+}
+
+static void dytc_lapmode_refresh(void)
+{
+ bool new_state;
+ int err;
+
+ err = dytc_lapmode_get(&new_state);
+ if (err || (new_state == dytc_lapmode))
+ return;
+
+ dytc_lapmode = new_state;
+ dytc_lapmode_notify_change();
+}
+
+/* sysfs lapmode entry */
+static ssize_t dytc_lapmode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dytc_lapmode);
+}
+
+static DEVICE_ATTR_RO(dytc_lapmode);
+
+static struct attribute *dytc_attributes[] = {
+ &dev_attr_dytc_lapmode.attr,
+ NULL,
+};
+
+static const struct attribute_group dytc_attr_group = {
+ .attrs = dytc_attributes,
+};
+
+static int tpacpi_dytc_init(struct ibm_init_struct *iibm)
+{
+ int err;
+
+ err = dytc_lapmode_get(&dytc_lapmode);
+ /* If support isn't available (ENODEV) then don't return an error
+ * but just don't create the sysfs group
+ */
+ if (err == -ENODEV)
+ return 0;
+ /* For all other errors we can flag the failure */
+ if (err)
+ return err;
+
+ /* Platform supports this feature - create the group */
+ err = sysfs_create_group(&tpacpi_pdev->dev.kobj, &dytc_attr_group);
+ return err;
+}
+
+static void dytc_exit(void)
+{
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &dytc_attr_group);
+}
+
+static struct ibm_struct dytc_driver_data = {
+ .name = "dytc",
+ .exit = dytc_exit,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -9850,6 +9963,10 @@ static void tpacpi_driver_event(const unsigned int hkey_event)
mutex_unlock(&kbdlight_mutex);
}
+
+ if (hkey_event == TP_HKEY_EV_THM_CSM_COMPLETED)
+ dytc_lapmode_refresh();
+
}
static void hotkey_driver_event(const unsigned int scancode)
@@ -10102,7 +10219,7 @@ static int __must_check __init get_thinkpad_model_data(
* X32 or newer, all Z series; Some models must have an
* up-to-date BIOS or they will not be detected.
*
- * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See https://thinkwiki.org/wiki/List_of_DMI_IDs
*/
while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
if (sscanf(dev->name,
@@ -10288,6 +10405,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = tpacpi_lcdshadow_init,
.data = &lcdshadow_driver_data,
},
+ {
+ .init = tpacpi_dytc_init,
+ .data = &dytc_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
@@ -10621,8 +10742,8 @@ MODULE_DEVICE_TABLE(acpi, ibm_htk_device_ids);
/*
* DMI matching for module autoloading
*
- * See http://thinkwiki.org/wiki/List_of_DMI_IDs
- * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ * See https://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See https://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
*
* Only models listed in thinkwiki will be supported, so add yours
* if it is not there yet.
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index c90f0990968b..597733ed86e9 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -19,8 +19,8 @@
* The idle + run duration is specified via separate helpers and that allows
* idle injection to be started.
*
- * The idle injection kthreads will call play_idle() with the idle duration
- * specified as per the above.
+ * The idle injection kthreads will call play_idle_precise() with the idle
+ * duration and max allowed latency specified as per the above.
*
* After all of them have been woken up, a timer is set to start the next idle
* injection cycle.
@@ -100,7 +100,7 @@ static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
*
* This function is called when the idle injection timer expires. It wakes up
* idle injection tasks associated with the timer and they, in turn, invoke
- * play_idle() to inject a specified amount of CPU idle time.
+ * play_idle_precise() to inject a specified amount of CPU idle time.
*
* Return: HRTIMER_RESTART.
*/
@@ -124,8 +124,8 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
* idle_inject_fn - idle injection work function
* @cpu: the CPU owning the task
*
- * This function calls play_idle() to inject a specified amount of CPU idle
- * time.
+ * This function calls play_idle_precise() to inject a specified amount of CPU
+ * idle time.
*/
static void idle_inject_fn(unsigned int cpu)
{
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 61a63a16b5e7..6f55aaef8afc 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -39,6 +39,8 @@
#define POWER_HIGH_LOCK BIT_ULL(63)
#define POWER_LOW_LOCK BIT(31)
+#define POWER_LIMIT4_MASK 0x1FFF
+
#define TIME_WINDOW1_MASK (0x7FULL<<17)
#define TIME_WINDOW2_MASK (0x7FULL<<49)
@@ -82,6 +84,7 @@ enum unit_type {
static const char pl1_name[] = "long_term";
static const char pl2_name[] = "short_term";
+static const char pl4_name[] = "peak_power";
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
@@ -93,6 +96,7 @@ struct rapl_defaults {
u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
bool to_raw);
unsigned int dram_domain_energy_unit;
+ unsigned int psys_domain_energy_unit;
};
static struct rapl_defaults *rapl_defaults;
@@ -337,6 +341,9 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
case PL2_ENABLE:
rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
break;
+ case PL4_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT4, power_limit);
+ break;
default:
ret = -EINVAL;
}
@@ -371,6 +378,9 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
case PL2_ENABLE:
prim = POWER_LIMIT2;
break;
+ case PL4_ENABLE:
+ prim = POWER_LIMIT4;
+ break;
default:
put_online_cpus();
return -EINVAL;
@@ -440,6 +450,13 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
case PL2_ENABLE:
ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
break;
+ case PL4_ENABLE:
+ /*
+ * Time window parameter is not applicable for PL4 entry
+ * so assigining '0' as default value.
+ */
+ val = 0;
+ break;
default:
put_online_cpus();
return -EINVAL;
@@ -483,6 +500,9 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
case PL2_ENABLE:
prim = MAX_POWER;
break;
+ case PL4_ENABLE:
+ prim = MAX_POWER;
+ break;
default:
put_online_cpus();
return -EINVAL;
@@ -492,6 +512,10 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
else
*data = val;
+ /* As a generalization rule, PL4 would be around two times PL2. */
+ if (rd->rpl[id].prim_id == PL4_ENABLE)
+ *data = *data * 2;
+
put_online_cpus();
return ret;
@@ -524,21 +548,42 @@ static void rapl_init_domains(struct rapl_package *rp)
rd->id = i;
rd->rpl[0].prim_id = PL1_ENABLE;
rd->rpl[0].name = pl1_name;
- /* some domain may support two power limits */
- if (rp->priv->limits[i] == 2) {
+
+ /*
+ * The PL2 power domain is applicable for limits two
+ * and limits three
+ */
+ if (rp->priv->limits[i] >= 2) {
rd->rpl[1].prim_id = PL2_ENABLE;
rd->rpl[1].name = pl2_name;
}
+ /* Enable PL4 domain if the total power limits are three */
+ if (rp->priv->limits[i] == 3) {
+ rd->rpl[2].prim_id = PL4_ENABLE;
+ rd->rpl[2].name = pl4_name;
+ }
+
for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
rd->regs[j] = rp->priv->regs[i][j];
- if (i == RAPL_DOMAIN_DRAM) {
+ switch (i) {
+ case RAPL_DOMAIN_DRAM:
rd->domain_energy_unit =
rapl_defaults->dram_domain_energy_unit;
if (rd->domain_energy_unit)
pr_info("DRAM domain energy unit %dpj\n",
rd->domain_energy_unit);
+ break;
+ case RAPL_DOMAIN_PLATFORM:
+ rd->domain_energy_unit =
+ rapl_defaults->psys_domain_energy_unit;
+ if (rd->domain_energy_unit)
+ pr_info("Platform domain energy unit %dpj\n",
+ rd->domain_energy_unit);
+ break;
+ default:
+ break;
}
rd++;
}
@@ -587,6 +632,8 @@ static struct rapl_primitive_info rpi[] = {
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
+ RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
@@ -597,6 +644,8 @@ static struct rapl_primitive_info rpi[] = {
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
+ RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
@@ -919,6 +968,14 @@ static const struct rapl_defaults rapl_defaults_hsw_server = {
.dram_domain_energy_unit = 15300,
};
+static const struct rapl_defaults rapl_defaults_spr_server = {
+ .check_unit = rapl_check_unit_core,
+ .set_floor_freq = set_floor_freq_default,
+ .compute_time_window = rapl_compute_time_window_core,
+ .dram_domain_energy_unit = 15300,
+ .psys_domain_energy_unit = 1000000000,
+};
+
static const struct rapl_defaults rapl_defaults_byt = {
.floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
.check_unit = rapl_check_unit_atom,
@@ -978,6 +1035,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
@@ -1252,6 +1310,7 @@ void rapl_remove_package(struct rapl_package *rp)
if (find_nr_power_limit(rd) > 1) {
rapl_write_data_raw(rd, PL2_ENABLE, 0);
rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ rapl_write_data_raw(rd, PL4_ENABLE, 0);
}
if (rd->id == RAPL_DOMAIN_PACKAGE) {
rd_package = rd;
@@ -1360,6 +1419,13 @@ static void power_limit_state_save(void)
if (ret)
rd->rpl[i].last_power_limit = 0;
break;
+ case PL4_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT4, true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
}
}
}
@@ -1390,6 +1456,11 @@ static void power_limit_state_restore(void)
rapl_write_data_raw(rd, POWER_LIMIT2,
rd->rpl[i].last_power_limit);
break;
+ case PL4_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd, POWER_LIMIT4,
+ rd->rpl[i].last_power_limit);
+ break;
}
}
}
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index d5487965bdfe..d2a2627507a9 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -28,6 +28,7 @@
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
+#define MSR_VR_CURRENT_CONFIG 0x00000601
/* private data for RAPL MSR Interface */
static struct rapl_if_priv rapl_msr_priv = {
@@ -123,13 +124,27 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
return ra->err;
}
+/* List of verified CPUs. */
+static const struct x86_cpu_id pl4_support_ids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+ {}
+};
+
static int rapl_msr_probe(struct platform_device *pdev)
{
+ const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
int ret;
rapl_msr_priv.read_raw = rapl_msr_read_raw;
rapl_msr_priv.write_raw = rapl_msr_write_raw;
+ if (id) {
+ rapl_msr_priv.limits[RAPL_DOMAIN_PACKAGE] = 3;
+ rapl_msr_priv.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] =
+ MSR_VR_CURRENT_CONFIG;
+ pr_info("PL4 support detected.\n");
+ }
+
rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
if (IS_ERR(rapl_msr_priv.control_type)) {
pr_debug("failed to register powercap control_type.\n");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index cb8d739067d2..7dbcf6973d33 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -370,15 +370,6 @@ config PWM_PCA9685
To compile this driver as a module, choose M here: the module
will be called pwm-pca9685.
-config PWM_PUV3
- tristate "PKUnity NetBook-0916 PWM support"
- depends on ARCH_PUV3
- help
- Generic PWM framework driver for PKUnity NetBook-0916.
-
- To compile this driver as a module, choose M here: the module
- will be called pwm-puv3.
-
config PWM_PXA
tristate "PXA PWM support"
depends on ARCH_PXA || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index a59c710e98c7..2c2ba0a03557 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
-obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
deleted file mode 100644
index 9d0bd87a425e..000000000000
--- a/drivers/pwm/pwm-puv3.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/unicore32/kernel/pwm.c
- *
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2010 Guan Xuetao
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/pwm.h>
-
-#include <asm/div64.h>
-#include <mach/hardware.h>
-
-struct puv3_pwm_chip {
- struct pwm_chip chip;
- void __iomem *base;
- struct clk *clk;
-};
-
-static inline struct puv3_pwm_chip *to_puv3(struct pwm_chip *chip)
-{
- return container_of(chip, struct puv3_pwm_chip, chip);
-}
-
-/*
- * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
- * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
- */
-static int puv3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
-{
- unsigned long period_cycles, prescale, pv, dc;
- struct puv3_pwm_chip *puv3 = to_puv3(chip);
- unsigned long long c;
-
- c = clk_get_rate(puv3->clk);
- c = c * period_ns;
- do_div(c, 1000000000);
- period_cycles = c;
-
- if (period_cycles < 1)
- period_cycles = 1;
-
- prescale = (period_cycles - 1) / 1024;
- pv = period_cycles / (prescale + 1) - 1;
-
- if (prescale > 63)
- return -EINVAL;
-
- if (duty_ns == period_ns)
- dc = OST_PWMDCCR_FDCYCLE;
- else
- dc = (pv + 1) * duty_ns / period_ns;
-
- /*
- * NOTE: the clock to PWM has to be enabled first
- * before writing to the registers
- */
- clk_prepare_enable(puv3->clk);
-
- writel(prescale, puv3->base + OST_PWM_PWCR);
- writel(pv - dc, puv3->base + OST_PWM_DCCR);
- writel(pv, puv3->base + OST_PWM_PCR);
-
- clk_disable_unprepare(puv3->clk);
-
- return 0;
-}
-
-static int puv3_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct puv3_pwm_chip *puv3 = to_puv3(chip);
-
- return clk_prepare_enable(puv3->clk);
-}
-
-static void puv3_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct puv3_pwm_chip *puv3 = to_puv3(chip);
-
- clk_disable_unprepare(puv3->clk);
-}
-
-static const struct pwm_ops puv3_pwm_ops = {
- .config = puv3_pwm_config,
- .enable = puv3_pwm_enable,
- .disable = puv3_pwm_disable,
- .owner = THIS_MODULE,
-};
-
-static int pwm_probe(struct platform_device *pdev)
-{
- struct puv3_pwm_chip *puv3;
- struct resource *r;
- int ret;
-
- puv3 = devm_kzalloc(&pdev->dev, sizeof(*puv3), GFP_KERNEL);
- if (!puv3)
- return -ENOMEM;
-
- puv3->clk = devm_clk_get(&pdev->dev, "OST_CLK");
- if (IS_ERR(puv3->clk))
- return PTR_ERR(puv3->clk);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- puv3->base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(puv3->base))
- return PTR_ERR(puv3->base);
-
- puv3->chip.dev = &pdev->dev;
- puv3->chip.ops = &puv3_pwm_ops;
- puv3->chip.base = -1;
- puv3->chip.npwm = 1;
-
- ret = pwmchip_add(&puv3->chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, puv3);
- return 0;
-}
-
-static int pwm_remove(struct platform_device *pdev)
-{
- struct puv3_pwm_chip *puv3 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&puv3->chip);
-}
-
-static struct platform_driver puv3_pwm_driver = {
- .driver = {
- .name = "PKUnity-v3-PWM",
- },
- .probe = pwm_probe,
- .remove = pwm_remove,
-};
-module_platform_driver(puv3_pwm_driver);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index edb1c4f8b496..de17ef7e18f0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -238,6 +238,16 @@ config REGULATOR_CPCAP
Say y here for CPCAP regulator found on some Motorola phones
and tablets such as Droid 4.
+config REGULATOR_CROS_EC
+ tristate "ChromeOS EC regulators"
+ depends on CROS_EC && OF
+ help
+ This driver supports voltage regulators that is connected to ChromeOS
+ EC and controlled through EC host commands.
+
+ This driver can also be built as a module. If so, the module
+ will be called cros-ec-regulator.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -326,6 +336,16 @@ config REGULATOR_FAN53555
input voltage supply of 2.5V to 5.5V. The output voltage is
programmed through an I2C interface.
+config REGULATOR_FAN53880
+ tristate "Fairchild FAN53880 Regulator"
+ depends on I2C && (OF || COMPILE_TEST)
+ select REGMAP_I2C
+ help
+ This driver supports Fairchild (ON Semiconductor) FAN53880
+ regulator. The regulator is a programmable power management IC
+ (PMIC), it is controlled by I2C and provides one BUCK, one BOOST
+ and four LDO outputs.
+
config REGULATOR_GPIO
tristate "GPIO regulator support"
depends on GPIOLIB || COMPILE_TEST
@@ -730,6 +750,14 @@ config REGULATOR_PBIAS
This driver provides support for OMAP pbias modelled
regulators.
+config REGULATOR_PCA9450
+ tristate "NXP PCA9450A/PCA9450B/PCA9450C regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the NXP PCA9450A/PCA9450B/PCA9450C PMIC
+ regulator driver.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -826,6 +854,16 @@ config REGULATOR_QCOM_SPMI
Qualcomm SPMI PMICs as a module. The module will be named
"qcom_spmi-regulator".
+config REGULATOR_QCOM_USB_VBUS
+ tristate "Qualcomm USB Vbus regulator driver"
+ depends on SPMI || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ regulator used to enable the VBUS output.
+
+ Say M here if you want to include support for enabling the VBUS output
+ as a module. The module will be named "qcom_usb_vbus_regulator".
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
@@ -989,6 +1027,13 @@ config REGULATOR_SY8824X
help
This driver supports SY8824C single output regulator.
+config REGULATOR_SY8827N
+ tristate "Silergy SY8827N regulator"
+ depends on I2C && (OF || COMPILE_TEST)
+ select REGMAP_I2C
+ help
+ This driver supports SY8827N single output regulator.
+
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
depends on I2C
@@ -1178,5 +1223,15 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
+config REGULATOR_QCOM_LABIBB
+ tristate "QCOM LAB/IBB regulator support"
+ depends on SPMI || COMPILE_TEST
+ help
+ This driver supports Qualcomm's LAB/IBB regulators present on the
+ Qualcomm's PMIC chip pmi8998. QCOM LAB and IBB are SPMI
+ based PMIC implementations. LAB can be used as positive
+ boost regulator and IBB can be used as a negative boost regulator
+ for LCD display panel.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0796e4a47afa..d8d3ecf526a8 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o
obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+obj-$(CONFIG_REGULATOR_CROS_EC) += cros-ec-regulator.o
obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_REGULATOR_DA9211) += da9211-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
+obj-$(CONFIG_REGULATOR_FAN53880) += fan53880.o
obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
obj-$(CONFIG_REGULATOR_HI6421V530) += hi6421v530-regulator.o
@@ -88,11 +90,14 @@ obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_LABIBB) += qcom-labibb-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
+obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
obj-$(CONFIG_REGULATOR_PV88080) += pv88080-regulator.o
@@ -120,6 +125,7 @@ obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
obj-$(CONFIG_REGULATOR_SY8824X) += sy8824x.o
+obj-$(CONFIG_REGULATOR_SY8827N) += sy8827n.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 716ca5bb178e..47b8b6f7b571 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -59,6 +59,7 @@ struct ab8500_shared_mode {
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
+ * @expand_register:
*/
struct ab8500_regulator_info {
struct device *dev;
@@ -79,12 +80,6 @@ struct ab8500_regulator_info {
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
- struct {
- u8 voltage_limit;
- u8 voltage_bank;
- u8 voltage_reg;
- u8 voltage_mask;
- } expand_register;
};
/* voltage tables for the vauxn/vintcore supplies */
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index ca92b3de0e9c..f9856d4e295f 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -139,7 +139,7 @@ static struct regulator_ops anatop_rops = {
.map_voltage = regulator_map_voltage_linear,
};
-static struct regulator_ops anatop_core_rops = {
+static const struct regulator_ops anatop_core_rops = {
.enable = anatop_regmap_enable,
.disable = anatop_regmap_disable,
.is_enabled = anatop_regmap_is_enabled,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 03154f5b939f..75ff7c563c5d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -105,6 +105,7 @@ static int regulator_balance_voltage(struct regulator_dev *rdev,
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
+static void destroy_regulator(struct regulator *regulator);
static void _regulator_put(struct regulator *regulator);
const char *rdev_get_name(struct regulator_dev *rdev)
@@ -2034,20 +2035,9 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/* regulator_list_mutex lock held by regulator_put() */
-static void _regulator_put(struct regulator *regulator)
+static void destroy_regulator(struct regulator *regulator)
{
- struct regulator_dev *rdev;
-
- if (IS_ERR_OR_NULL(regulator))
- return;
-
- lockdep_assert_held_once(&regulator_list_mutex);
-
- /* Docs say you must disable before calling regulator_put() */
- WARN_ON(regulator->enable_count);
-
- rdev = regulator->rdev;
+ struct regulator_dev *rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@@ -2068,6 +2058,24 @@ static void _regulator_put(struct regulator *regulator)
kfree_const(regulator->supply_name);
kfree(regulator);
+}
+
+/* regulator_list_mutex lock held by regulator_put() */
+static void _regulator_put(struct regulator *regulator)
+{
+ struct regulator_dev *rdev;
+
+ if (IS_ERR_OR_NULL(regulator))
+ return;
+
+ lockdep_assert_held_once(&regulator_list_mutex);
+
+ /* Docs say you must disable before calling regulator_put() */
+ WARN_ON(regulator->enable_count);
+
+ rdev = regulator->rdev;
+
+ destroy_regulator(regulator);
module_put(rdev->owner);
put_device(&rdev->dev);
@@ -2347,6 +2355,37 @@ static void _regulator_enable_delay(unsigned int delay)
udelay(us);
}
+/**
+ * _regulator_check_status_enabled
+ *
+ * A helper function to check if the regulator status can be interpreted
+ * as 'regulator is enabled'.
+ * @rdev: the regulator device to check
+ *
+ * Return:
+ * * 1 - if status shows regulator is in enabled state
+ * * 0 - if not enabled state
+ * * Error Value - as received from ops->get_status()
+ */
+static inline int _regulator_check_status_enabled(struct regulator_dev *rdev)
+{
+ int ret = rdev->desc->ops->get_status(rdev);
+
+ if (ret < 0) {
+ rdev_info(rdev, "get_status returned error: %d\n", ret);
+ return ret;
+ }
+
+ switch (ret) {
+ case REGULATOR_STATUS_OFF:
+ case REGULATOR_STATUS_ERROR:
+ case REGULATOR_STATUS_UNDEFINED:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
static int _regulator_do_enable(struct regulator_dev *rdev)
{
int ret, delay;
@@ -2407,7 +2446,37 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* together. */
trace_regulator_enable_delay(rdev_get_name(rdev));
- _regulator_enable_delay(delay);
+ /* If poll_enabled_time is set, poll upto the delay calculated
+ * above, delaying poll_enabled_time uS to check if the regulator
+ * actually got enabled.
+ * If the regulator isn't enabled after enable_delay has
+ * expired, return -ETIMEDOUT.
+ */
+ if (rdev->desc->poll_enabled_time) {
+ unsigned int time_remaining = delay;
+
+ while (time_remaining > 0) {
+ _regulator_enable_delay(rdev->desc->poll_enabled_time);
+
+ if (rdev->desc->ops->get_status) {
+ ret = _regulator_check_status_enabled(rdev);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ break;
+ } else if (rdev->desc->ops->is_enabled(rdev))
+ break;
+
+ time_remaining -= rdev->desc->poll_enabled_time;
+ }
+
+ if (time_remaining <= 0) {
+ rdev_err(rdev, "Enabled check timed out\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ _regulator_enable_delay(delay);
+ }
trace_regulator_enable_complete(rdev_get_name(rdev));
@@ -5023,7 +5092,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
struct regulator_dev *rdev;
bool dangling_cfg_gpiod = false;
bool dangling_of_gpiod = false;
- bool reg_device_fail = false;
struct device *dev;
int ret, i;
@@ -5152,10 +5220,12 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
/* register with sysfs */
+ device_initialize(&rdev->dev);
rdev->dev.class = &regulator_class;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%lu",
(unsigned long) atomic_inc_return(&regulator_no));
+ dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
if (init_data)
@@ -5206,12 +5276,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
!rdev->desc->fixed_uV)
rdev->is_switch = true;
- dev_set_drvdata(&rdev->dev, rdev);
- ret = device_register(&rdev->dev);
- if (ret != 0) {
- reg_device_fail = true;
+ ret = device_add(&rdev->dev);
+ if (ret != 0)
goto unset_supplies;
- }
rdev_init_debugfs(rdev);
@@ -5233,17 +5300,15 @@ unset_supplies:
mutex_unlock(&regulator_list_mutex);
wash:
kfree(rdev->coupling_desc.coupled_rdevs);
- kfree(rdev->constraints);
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
+ put_device(&rdev->dev);
+ rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- if (reg_device_fail)
- put_device(&rdev->dev);
- else
- kfree(rdev);
+ kfree(rdev);
kfree(config);
rinse:
if (dangling_cfg_gpiod)
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index f80781d58a28..79b3eb3222c6 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -89,7 +89,7 @@
*/
#define CPCAP_REG_OFF_MODE_SEC BIT(15)
-/**
+/*
* SoC specific configuration for CPCAP regulator. There are at least three
* different SoCs each with their own parameters: omap3, omap4 and tegra2.
*
@@ -169,7 +169,7 @@ enum cpcap_regulator_id {
static int cpcap_regulator_enable(struct regulator_dev *rdev)
{
struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
- int error, ignore;
+ int error;
error = regulator_enable_regmap(rdev);
if (error)
@@ -180,7 +180,7 @@ static int cpcap_regulator_enable(struct regulator_dev *rdev)
regulator->assign_mask,
regulator->assign_mask);
if (error)
- ignore = regulator_disable_regmap(rdev);
+ regulator_disable_regmap(rdev);
}
return error;
@@ -193,7 +193,7 @@ static int cpcap_regulator_enable(struct regulator_dev *rdev)
static int cpcap_regulator_disable(struct regulator_dev *rdev)
{
struct cpcap_regulator *regulator = rdev_get_drvdata(rdev);
- int error, ignore;
+ int error;
if (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC) {
error = regmap_update_bits(rdev->regmap, regulator->assign_reg,
@@ -204,9 +204,9 @@ static int cpcap_regulator_disable(struct regulator_dev *rdev)
error = regulator_disable_regmap(rdev);
if (error && (rdev->desc->enable_val & CPCAP_REG_OFF_MODE_SEC)) {
- ignore = regmap_update_bits(rdev->regmap, regulator->assign_reg,
- regulator->assign_mask,
- regulator->assign_mask);
+ regmap_update_bits(rdev->regmap, regulator->assign_reg,
+ regulator->assign_mask,
+ regulator->assign_mask);
}
return error;
@@ -256,7 +256,7 @@ static int cpcap_regulator_set_mode(struct regulator_dev *rdev,
CPCAP_BIT_AUDIO_LOW_PWR, value);
}
-static struct regulator_ops cpcap_regulator_ops = {
+static const struct regulator_ops cpcap_regulator_ops = {
.enable = cpcap_regulator_enable,
.disable = cpcap_regulator_disable,
.is_enabled = regulator_is_enabled_regmap,
@@ -325,7 +325,7 @@ static const unsigned int vvib_val_tbl[] = { 1300000, 1800000, 2000000,
static const unsigned int vusb_val_tbl[] = { 0, 3300000, };
static const unsigned int vaudio_val_tbl[] = { 0, 2775000, };
-/**
+/*
* SoC specific configuration for omap4. The data below is comes from Motorola
* Linux kernel tree. It's basically the values of cpcap_regltr_data,
* cpcap_regulator_mode_values and cpcap_regulator_off_mode_values, see
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
new file mode 100644
index 000000000000..3117bbd2826b
--- /dev/null
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google LLC.
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+struct cros_ec_regulator_data {
+ struct regulator_desc desc;
+ struct regulator_dev *dev;
+ struct cros_ec_device *ec_dev;
+
+ u32 index;
+
+ u16 *voltages_mV;
+ u16 num_voltages;
+};
+
+static int cros_ec_cmd(struct cros_ec_device *ec, u32 version, u32 command,
+ void *outdata, u32 outsize, void *indata, u32 insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outdata && outsize > 0)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(ec, msg);
+ if (ret < 0)
+ goto cleanup;
+
+ if (insize)
+ memcpy(indata, msg->data, insize);
+
+cleanup:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_regulator_enable(struct regulator_dev *dev)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+ struct ec_params_regulator_enable cmd = {
+ .index = data->index,
+ .enable = 1,
+ };
+
+ return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
+ sizeof(cmd), NULL, 0);
+}
+
+static int cros_ec_regulator_disable(struct regulator_dev *dev)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+ struct ec_params_regulator_enable cmd = {
+ .index = data->index,
+ .enable = 0,
+ };
+
+ return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_ENABLE, &cmd,
+ sizeof(cmd), NULL, 0);
+}
+
+static int cros_ec_regulator_is_enabled(struct regulator_dev *dev)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+ struct ec_params_regulator_is_enabled cmd = {
+ .index = data->index,
+ };
+ struct ec_response_regulator_is_enabled resp;
+ int ret;
+
+ ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_IS_ENABLED, &cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+ return resp.enabled;
+}
+
+static int cros_ec_regulator_list_voltage(struct regulator_dev *dev,
+ unsigned int selector)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+
+ if (selector >= data->num_voltages)
+ return -EINVAL;
+
+ return data->voltages_mV[selector] * 1000;
+}
+
+static int cros_ec_regulator_get_voltage(struct regulator_dev *dev)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+ struct ec_params_regulator_get_voltage cmd = {
+ .index = data->index,
+ };
+ struct ec_response_regulator_get_voltage resp;
+ int ret;
+
+ ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_VOLTAGE, &cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+ return resp.voltage_mv * 1000;
+}
+
+static int cros_ec_regulator_set_voltage(struct regulator_dev *dev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct cros_ec_regulator_data *data = rdev_get_drvdata(dev);
+ int min_mV = DIV_ROUND_UP(min_uV, 1000);
+ int max_mV = max_uV / 1000;
+ struct ec_params_regulator_set_voltage cmd = {
+ .index = data->index,
+ .min_mv = min_mV,
+ .max_mv = max_mV,
+ };
+
+ /*
+ * This can happen when the given range [min_uV, max_uV] doesn't
+ * contain any voltage that can be represented exactly in mV.
+ */
+ if (min_mV > max_mV)
+ return -EINVAL;
+
+ return cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_SET_VOLTAGE, &cmd,
+ sizeof(cmd), NULL, 0);
+}
+
+static const struct regulator_ops cros_ec_regulator_voltage_ops = {
+ .enable = cros_ec_regulator_enable,
+ .disable = cros_ec_regulator_disable,
+ .is_enabled = cros_ec_regulator_is_enabled,
+ .list_voltage = cros_ec_regulator_list_voltage,
+ .get_voltage = cros_ec_regulator_get_voltage,
+ .set_voltage = cros_ec_regulator_set_voltage,
+};
+
+static int cros_ec_regulator_init_info(struct device *dev,
+ struct cros_ec_regulator_data *data)
+{
+ struct ec_params_regulator_get_info cmd = {
+ .index = data->index,
+ };
+ struct ec_response_regulator_get_info resp;
+ int ret;
+
+ ret = cros_ec_cmd(data->ec_dev, 0, EC_CMD_REGULATOR_GET_INFO, &cmd,
+ sizeof(cmd), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ data->num_voltages =
+ min_t(u16, ARRAY_SIZE(resp.voltages_mv), resp.num_voltages);
+ data->voltages_mV =
+ devm_kmemdup(dev, resp.voltages_mv,
+ sizeof(u16) * data->num_voltages, GFP_KERNEL);
+ data->desc.n_voltages = data->num_voltages;
+
+ /* Make sure the returned name is always a valid string */
+ resp.name[ARRAY_SIZE(resp.name) - 1] = '\0';
+ data->desc.name = devm_kstrdup(dev, resp.name, GFP_KERNEL);
+ if (!data->desc.name)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int cros_ec_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct cros_ec_regulator_data *drvdata;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct regulator_desc *desc;
+ int ret;
+
+ drvdata = devm_kzalloc(
+ &pdev->dev, sizeof(struct cros_ec_regulator_data), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->ec_dev = dev_get_drvdata(dev->parent);
+ desc = &drvdata->desc;
+
+ init_data = of_get_regulator_init_data(dev, np, desc);
+ if (!init_data)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "reg", &drvdata->index);
+ if (ret < 0)
+ return ret;
+
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->ops = &cros_ec_regulator_voltage_ops;
+
+ ret = cros_ec_regulator_init_info(dev, drvdata);
+ if (ret < 0)
+ return ret;
+
+ cfg.dev = &pdev->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = drvdata;
+ cfg.of_node = np;
+
+ drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
+ if (IS_ERR(drvdata->dev)) {
+ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+ return PTR_ERR(drvdata->dev);
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+}
+
+static const struct of_device_id regulator_cros_ec_of_match[] = {
+ { .compatible = "google,cros-ec-regulator", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, regulator_cros_ec_of_match);
+
+static struct platform_driver cros_ec_regulator_driver = {
+ .probe = cros_ec_regulator_probe,
+ .driver = {
+ .name = "cros-ec-regulator",
+ .of_match_table = regulator_cros_ec_of_match,
+ },
+};
+
+module_platform_driver(cros_ec_regulator_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChromeOS EC controlled regulator");
+MODULE_AUTHOR("Pi-Hsun Shih <pihsun@chromium.org>");
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 2ea4362ffa5c..297b3aa7c753 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -17,6 +17,7 @@
#include <linux/gpio/consumer.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/da9211.h>
+#include <dt-bindings/regulator/dlg,da9211-regulator.h>
#include "da9211-regulator.h"
/* DEVICE IDs */
@@ -24,10 +25,6 @@
#define DA9213_DEVICE_ID 0x23
#define DA9215_DEVICE_ID 0x24
-#define DA9211_BUCK_MODE_SLEEP 1
-#define DA9211_BUCK_MODE_SYNC 2
-#define DA9211_BUCK_MODE_AUTO 3
-
/* DA9211 REGULATOR IDs */
#define DA9211_ID_BUCKA 0
#define DA9211_ID_BUCKB 1
@@ -89,6 +86,20 @@ static const int da9215_current_limits[] = {
5600000, 5800000, 6000000, 6200000, 6400000, 6600000, 6800000, 7000000
};
+static unsigned int da9211_map_buck_mode(unsigned int mode)
+{
+ switch (mode) {
+ case DA9211_BUCK_MODE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ case DA9211_BUCK_MODE_SYNC:
+ return REGULATOR_MODE_FAST;
+ case DA9211_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev)
{
int id = rdev_get_id(rdev);
@@ -236,6 +247,7 @@ static const struct regulator_ops da9211_buck_ops = {
.vsel_reg = DA9211_REG_VBUCKA_A + DA9211_ID_##_id * 2,\
.vsel_mask = DA9211_VBUCK_MASK,\
.owner = THIS_MODULE,\
+ .of_map_mode = da9211_map_buck_mode,\
}
static struct regulator_desc da9211_regulators[] = {
@@ -245,8 +257,14 @@ static struct regulator_desc da9211_regulators[] = {
#ifdef CONFIG_OF
static struct of_regulator_match da9211_matches[] = {
- [DA9211_ID_BUCKA] = { .name = "BUCKA" },
- [DA9211_ID_BUCKB] = { .name = "BUCKB" },
+ [DA9211_ID_BUCKA] = {
+ .name = "BUCKA",
+ .desc = &da9211_regulators[DA9211_ID_BUCKA],
+ },
+ [DA9211_ID_BUCKB] = {
+ .name = "BUCKB",
+ .desc = &da9211_regulators[DA9211_ID_BUCKB],
+ },
};
static struct da9211_pdata *da9211_parse_regulators_dt(
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index f604c8db6d0e..c3ad6aa6b5d3 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -110,13 +110,6 @@ static int ux500_regulator_status_show(struct seq_file *s, void *p)
}
DEFINE_SHOW_ATTRIBUTE(ux500_regulator_status);
-int __attribute__((weak)) dbx500_regulator_testcase(
- struct dbx500_regulator_info *regulator_info,
- int num_regulators)
-{
- return 0;
-}
-
int
ux500_regulator_debug_init(struct platform_device *pdev,
struct dbx500_regulator_info *regulator_info,
@@ -152,7 +145,6 @@ ux500_regulator_debug_init(struct platform_device *pdev,
if (!rdebug.state_after_suspend)
goto exit_free;
- dbx500_regulator_testcase(regulator_info, num_regulators);
return 0;
exit_free:
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 3ea1c170f840..3091210889e3 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -41,8 +41,8 @@ static struct regulator *_devm_regulator_get(struct device *dev, const char *id,
/**
* devm_regulator_get - Resource managed regulator_get()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
*
* Managed regulator_get(). Regulators returned from this function are
* automatically regulator_put() on driver detach. See regulator_get() for more
@@ -56,8 +56,8 @@ EXPORT_SYMBOL_GPL(devm_regulator_get);
/**
* devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
*
* Managed regulator_get_exclusive(). Regulators returned from this function
* are automatically regulator_put() on driver detach. See regulator_get() for
@@ -72,8 +72,8 @@ EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
/**
* devm_regulator_get_optional - Resource managed regulator_get_optional()
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
*
* Managed regulator_get_optional(). Regulators returned from this
* function are automatically regulator_put() on driver detach. See
@@ -130,9 +130,9 @@ static void devm_regulator_bulk_release(struct device *dev, void *res)
/**
* devm_regulator_bulk_get - managed get multiple regulator consumers
*
- * @dev: Device to supply
- * @num_consumers: Number of consumers to register
- * @consumers: Configuration of consumers; clients are stored here.
+ * @dev: device to supply
+ * @num_consumers: number of consumers to register
+ * @consumers: configuration of consumers; clients are stored here.
*
* @return 0 on success, an errno on failure.
*
@@ -173,8 +173,9 @@ static void devm_rdev_release(struct device *dev, void *res)
/**
* devm_regulator_register - Resource managed regulator_register()
+ * @dev: device to supply
* @regulator_desc: regulator to register
- * @config: runtime configuration for regulator
+ * @config: runtime configuration for regulator
*
* Called by regulator drivers to register a regulator. Returns a
* valid pointer to struct regulator_dev on success or an ERR_PTR() on
@@ -216,7 +217,8 @@ static int devm_rdev_match(struct device *dev, void *res, void *data)
/**
* devm_regulator_unregister - Resource managed regulator_unregister()
- * @regulator: regulator to free
+ * @dev: device to supply
+ * @rdev: regulator to free
*
* Unregister a regulator registered with devm_regulator_register().
* Normally this function will not need to be called and the resource
@@ -257,10 +259,10 @@ static void devm_regulator_destroy_supply_alias(struct device *dev, void *res)
* devm_regulator_register_supply_alias - Resource managed
* regulator_register_supply_alias()
*
- * @dev: device that will be given as the regulator "consumer"
- * @id: Supply name or regulator ID
+ * @dev: device to supply
+ * @id: supply name or regulator ID
* @alias_dev: device that should be used to lookup the supply
- * @alias_id: Supply name or regulator ID that should be used to lookup the
+ * @alias_id: supply name or regulator ID that should be used to lookup the
* supply
*
* The supply alias will automatically be unregistered when the source
@@ -298,8 +300,8 @@ EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
* devm_regulator_unregister_supply_alias - Resource managed
* regulator_unregister_supply_alias()
*
- * @dev: device that will be given as the regulator "consumer"
- * @id: Supply name or regulator ID
+ * @dev: device to supply
+ * @id: supply name or regulator ID
*
* Unregister an alias registered with
* devm_regulator_register_supply_alias(). Normally this function
@@ -325,12 +327,12 @@ EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
* devm_regulator_bulk_register_supply_alias - Managed register
* multiple aliases
*
- * @dev: device that will be given as the regulator "consumer"
- * @id: List of supply names or regulator IDs
+ * @dev: device to supply
+ * @id: list of supply names or regulator IDs
* @alias_dev: device that should be used to lookup the supply
- * @alias_id: List of supply names or regulator IDs that should be used to
- * lookup the supply
- * @num_id: Number of aliases to register
+ * @alias_id: list of supply names or regulator IDs that should be used to
+ * lookup the supply
+ * @num_id: number of aliases to register
*
* @return 0 on success, an errno on failure.
*
@@ -375,9 +377,9 @@ EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
* devm_regulator_bulk_unregister_supply_alias - Managed unregister
* multiple aliases
*
- * @dev: device that will be given as the regulator "consumer"
- * @id: List of supply names or regulator IDs
- * @num_id: Number of aliases to unregister
+ * @dev: device to supply
+ * @id: list of supply names or regulator IDs
+ * @num_id: number of aliases to unregister
*
* Unregister aliases registered with
* devm_regulator_bulk_register_supply_alias(). Normally this function
@@ -421,7 +423,7 @@ static void devm_regulator_destroy_notifier(struct device *dev, void *res)
* regulator_register_notifier
*
* @regulator: regulator source
- * @nb: notifier block
+ * @nb: notifier block
*
* The notifier will be registers under the consumer device and be
* automatically be unregistered when the source device is unbound.
@@ -458,7 +460,7 @@ EXPORT_SYMBOL_GPL(devm_regulator_register_notifier);
* regulator_unregister_notifier()
*
* @regulator: regulator source
- * @nb: notifier block
+ * @nb: notifier block
*
* Unregister a notifier registered with devm_regulator_register_notifier().
* Normally this function will not need to be called and the resource
diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
new file mode 100644
index 000000000000..e83eb4fb1876
--- /dev/null
+++ b/drivers/regulator/fan53880.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum fan53880_regulator_ids {
+ FAN53880_LDO1,
+ FAN53880_LDO2,
+ FAN53880_LDO3,
+ FAN53880_LDO4,
+ FAN53880_BUCK,
+ FAN53880_BOOST,
+};
+
+enum fan53880_registers {
+ FAN53880_PRODUCT_ID = 0x00,
+ FAN53880_SILICON_REV,
+ FAN53880_BUCKVOUT,
+ FAN53880_BOOSTVOUT,
+ FAN53880_LDO1VOUT,
+ FAN53880_LDO2VOUT,
+ FAN53880_LDO3VOUT,
+ FAN53880_LDO4VOUT,
+ FAN53880_IOUT,
+ FAN53880_ENABLE,
+ FAN53880_ENABLE_BOOST,
+};
+
+#define FAN53880_ID 0x01
+
+static const struct regulator_ops fan53880_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define FAN53880_LDO(_num, _supply, _default) \
+ [FAN53880_LDO ## _num] = { \
+ .name = "LDO"#_num, \
+ .of_match = of_match_ptr("LDO"#_num), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .linear_ranges = (struct linear_range[]) { \
+ REGULATOR_LINEAR_RANGE(_default, 0x0, 0x0, 0), \
+ REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
+ }, \
+ .n_linear_ranges = 2, \
+ .vsel_reg = FAN53880_LDO ## _num ## VOUT, \
+ .vsel_mask = 0x7f, \
+ .enable_reg = FAN53880_ENABLE, \
+ .enable_mask = BIT(_num - 1), \
+ .enable_time = 150, \
+ .supply_name = _supply, \
+ .ops = &fan53880_ops, \
+ }
+
+static const struct regulator_desc fan53880_regulators[] = {
+ FAN53880_LDO(1, "VIN12", 2800000),
+ FAN53880_LDO(2, "VIN12", 2800000),
+ FAN53880_LDO(3, "VIN3", 1800000),
+ FAN53880_LDO(4, "VIN4", 1800000),
+ [FAN53880_BUCK] = {
+ .name = "BUCK",
+ .of_match = of_match_ptr("BUCK"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1100000, 0x0, 0x0, 0),
+ REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
+ },
+ .n_linear_ranges = 2,
+ .vsel_reg = FAN53880_BUCKVOUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = FAN53880_ENABLE,
+ .enable_mask = 0x10,
+ .enable_time = 480,
+ .supply_name = "PVIN",
+ .ops = &fan53880_ops,
+ },
+ [FAN53880_BOOST] = {
+ .name = "BOOST",
+ .of_match = of_match_ptr("BOOST"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(5000000, 0x0, 0x0, 0),
+ REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
+ },
+ .n_linear_ranges = 2,
+ .vsel_reg = FAN53880_BOOSTVOUT,
+ .vsel_mask = 0x7f,
+ .enable_reg = FAN53880_ENABLE_BOOST,
+ .enable_mask = 0xff,
+ .enable_time = 580,
+ .supply_name = "PVIN",
+ .ops = &fan53880_ops,
+ },
+};
+
+static const struct regmap_config fan53880_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FAN53880_ENABLE_BOOST,
+};
+
+static int fan53880_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i, ret;
+ unsigned int data;
+
+ regmap = devm_regmap_init_i2c(i2c, &fan53880_regmap);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "Failed to create regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(regmap, FAN53880_PRODUCT_ID, &data);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read PRODUCT_ID: %d\n", ret);
+ return ret;
+ }
+ if (data != FAN53880_ID) {
+ dev_err(&i2c->dev, "Unsupported device id: 0x%x.\n", data);
+ return -ENODEV;
+ }
+
+ config.dev = &i2c->dev;
+ config.init_data = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fan53880_regulators); i++) {
+ rdev = devm_regulator_register(&i2c->dev,
+ &fan53880_regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "Failed to register %s: %d\n",
+ fan53880_regulators[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id fan53880_dt_ids[] = {
+ { .compatible = "onnn,fan53880", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fan53880_dt_ids);
+#endif
+
+static const struct i2c_device_id fan53880_i2c_id[] = {
+ { "fan53880", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, fan53880_i2c_id);
+
+static struct i2c_driver fan53880_regulator_driver = {
+ .driver = {
+ .name = "fan53880",
+ .of_match_table = of_match_ptr(fan53880_dt_ids),
+ },
+ .probe = fan53880_i2c_probe,
+ .id_table = fan53880_i2c_id,
+};
+module_i2c_driver(fan53880_regulator_driver);
+
+MODULE_DESCRIPTION("FAN53880 PMIC voltage regulator driver");
+MODULE_AUTHOR("Christoph Fritz <chf.fritz@googlemail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index bc0bbd99e98d..d54830e48b8d 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -210,7 +210,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
/*
* The signal will be inverted by the GPIO core if flagged so in the
- * decriptor.
+ * descriptor.
*/
if (config->enabled_at_boot)
gflags = GPIOD_OUT_HIGH;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 110ee6fe76c4..5927d4f3eabd 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -148,6 +148,13 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
config->supply_name = config->init_data->constraints.name;
+ if (config->init_data->constraints.boot_on)
+ config->enabled_at_boot = true;
+
+ /*
+ * Do not use: undocumented device tree property.
+ * This is kept around solely for device tree ABI stability.
+ */
if (of_property_read_bool(np, "enable-at-boot"))
config->enabled_at_boot = true;
@@ -311,7 +318,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
/*
* The signal will be inverted by the GPIO core if flagged so in the
- * decriptor.
+ * descriptor.
*/
if (config->enabled_at_boot)
gflags = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE;
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 66219d8dfc1a..dc631c1a46b4 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -5,7 +5,7 @@
// Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
// http://www.hisilicon.com
// Copyright (c) <2013-2014> Linaro Ltd.
-// http://www.linaro.org
+// https://www.linaro.org
//
// Author: Guodong Xu <guodong.xu@linaro.org>
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
index 06ae65199afd..988115f9b594 100644
--- a/drivers/regulator/hi6421v530-regulator.c
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -5,7 +5,7 @@
// Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
// http://www.hisilicon.com
// Copyright (c) <2017> Linaro Ltd.
-// http://www.linaro.org
+// https://www.linaro.org
//
// Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
// Guodong Xu <guodong.xu@linaro.org>
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index fe049b67e7d5..c38387e0fbb2 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -1,7 +1,7 @@
/*
* Regulator driver for LP873X PMIC
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 5d525dacf959..eeab9d3c824b 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -2,7 +2,7 @@
/*
* Regulator driver for LP87565 PMIC
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/module.h>
@@ -11,8 +11,8 @@
#include <linux/mfd/lp87565.h>
-#define LP87565_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
- _delay, _lr, _cr) \
+#define LP87565_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, \
+ _er, _em, _ev, _delay, _lr, _cr) \
[_id] = { \
.desc = { \
.name = _name, \
@@ -28,6 +28,7 @@
.vsel_mask = _vm, \
.enable_reg = _er, \
.enable_mask = _em, \
+ .enable_val = _ev, \
.ramp_delay = _delay, \
.linear_ranges = _lr, \
.n_linear_ranges = ARRAY_SIZE(_lr), \
@@ -121,38 +122,54 @@ static const struct lp87565_regulator regulators[] = {
LP87565_REGULATOR("BUCK0", LP87565_BUCK_0, "buck0", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK1", LP87565_BUCK_1, "buck1", lp87565_buck_ops,
256, LP87565_REG_BUCK1_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK1_CTRL_1,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK1_CTRL_2),
LP87565_REGULATOR("BUCK2", LP87565_BUCK_2, "buck2", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
LP87565_REGULATOR("BUCK3", LP87565_BUCK_3, "buck3", lp87565_buck_ops,
256, LP87565_REG_BUCK3_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK3_CTRL_1,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK3_CTRL_2),
LP87565_REGULATOR("BUCK10", LP87565_BUCK_10, "buck10", lp87565_buck_ops,
256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK0_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL |
+ LP87565_BUCK_CTRL_1_FPWM_MP_0_2,
+ LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
LP87565_REGULATOR("BUCK23", LP87565_BUCK_23, "buck23", lp87565_buck_ops,
256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
LP87565_REG_BUCK2_CTRL_1,
+ LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL,
LP87565_BUCK_CTRL_1_EN, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
LP87565_REGULATOR("BUCK3210", LP87565_BUCK_3210, "buck3210",
lp87565_buck_ops, 256, LP87565_REG_BUCK0_VOUT,
LP87565_BUCK_VSET, LP87565_REG_BUCK0_CTRL_1,
LP87565_BUCK_CTRL_1_EN |
+ LP87565_BUCK_CTRL_1_EN_PIN_CTRL |
+ LP87565_BUCK_CTRL_1_FPWM_MP_0_2,
+ LP87565_BUCK_CTRL_1_EN |
LP87565_BUCK_CTRL_1_FPWM_MP_0_2, 3230,
buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
};
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index e12e52c69e52..093b3e4a6303 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -221,7 +221,7 @@ static const struct regulator_ops ltc3676_fixed_regulator_ops = {
#define LTC3676_FIXED_REG(_id, _name, _en_reg, _en_bit) \
LTC3676_REG(_id, _name, fixed, LTC3676_ ## _en_reg, _en_bit, 0, 0)
-static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
+static const struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = {
LTC3676_LINEAR_REG(SW1, sw1, BUCK1, DVB1A),
LTC3676_LINEAR_REG(SW2, sw2, BUCK2, DVB2A),
LTC3676_LINEAR_REG(SW3, sw3, BUCK3, DVB3A),
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 07a150c9bbf2..e34face736f4 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -155,7 +155,7 @@ static const struct regulator_desc max77836_supported_regulators[] = {
[MAX77836_LDO2] = MAX77836_LDO_REG(2),
};
-/**
+/*
* Registers for regulators of max77836 use different I2C slave addresses so
* different regmaps must be used for them.
*
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 96dc0eea7659..1a6fd68f3fb1 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -109,7 +109,7 @@ struct max8907_regulator {
static const struct regulator_ops max8907_mbatt_ops = {
};
-static struct regulator_ops max8907_ldo_ops = {
+static const struct regulator_ops max8907_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -128,7 +128,7 @@ static const struct regulator_ops max8907_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops max8907_out5v_ops = {
+static const struct regulator_ops max8907_out5v_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -145,7 +145,7 @@ static const struct regulator_ops max8907_bbat_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
-static struct regulator_desc max8907_regulators[] = {
+static const struct regulator_desc max8907_regulators[] = {
REG_MBATT(),
REG_LDO(SD1, "in-v1", MAX8907_REG_SDCTL1, 650000, 2225000, 25000),
REG_LDO(SD2, "in-v2", MAX8907_REG_SDCTL2, 637500, 1425000, 12500),
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index 4d2487279a0a..ba47a5e2fbcb 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -732,7 +732,7 @@ static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
return max8997_update_reg(i2c, reg, ~pattern, mask);
}
-static struct regulator_ops max8997_ldo_ops = {
+static const struct regulator_ops max8997_ldo_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
@@ -742,7 +742,7 @@ static struct regulator_ops max8997_ldo_ops = {
.set_suspend_disable = max8997_reg_disable_suspend,
};
-static struct regulator_ops max8997_buck_ops = {
+static const struct regulator_ops max8997_buck_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
@@ -753,7 +753,7 @@ static struct regulator_ops max8997_buck_ops = {
.set_suspend_disable = max8997_reg_disable_suspend,
};
-static struct regulator_ops max8997_fixedvolt_ops = {
+static const struct regulator_ops max8997_fixedvolt_ops = {
.list_voltage = max8997_list_voltage,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
@@ -761,7 +761,7 @@ static struct regulator_ops max8997_fixedvolt_ops = {
.set_suspend_disable = max8997_reg_disable_suspend,
};
-static struct regulator_ops max8997_safeout_ops = {
+static const struct regulator_ops max8997_safeout_ops = {
.list_voltage = regulator_list_voltage_table,
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
@@ -771,7 +771,7 @@ static struct regulator_ops max8997_safeout_ops = {
.set_suspend_disable = max8997_reg_disable_suspend,
};
-static struct regulator_ops max8997_fixedstate_ops = {
+static const struct regulator_ops max8997_fixedstate_ops = {
.list_voltage = max8997_list_voltage_charger_cv,
.get_voltage_sel = max8997_get_voltage_sel,
.set_voltage = max8997_set_voltage_charger_cv,
@@ -805,7 +805,7 @@ static int max8997_get_current_limit(struct regulator_dev *rdev)
return max8997_list_voltage(rdev, sel);
}
-static struct regulator_ops max8997_charger_ops = {
+static const struct regulator_ops max8997_charger_ops = {
.is_enabled = max8997_reg_is_enabled,
.enable = max8997_reg_enable,
.disable = max8997_reg_disable,
@@ -813,7 +813,7 @@ static struct regulator_ops max8997_charger_ops = {
.set_current_limit = max8997_set_current_limit,
};
-static struct regulator_ops max8997_charger_fixedstate_ops = {
+static const struct regulator_ops max8997_charger_fixedstate_ops = {
.get_current_limit = max8997_get_current_limit,
.set_current_limit = max8997_set_current_limit,
};
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 340413bba0c5..ac69bdd398cb 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -415,7 +415,7 @@ static int max8998_set_current_limit(struct regulator_dev *rdev,
sel, rdev->desc->csel_mask);
}
-int max8998_get_current_limit(struct regulator_dev *rdev)
+static int max8998_get_current_limit(struct regulator_dev *rdev)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index 1786f7162019..d3d475f717f4 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -206,8 +206,7 @@ static const struct regmap_config mp886x_regmap_config = {
.val_bits = 8,
};
-static int mp886x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mp886x_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
@@ -280,7 +279,7 @@ static struct i2c_driver mp886x_regulator_driver = {
.name = "mp886x-regulator",
.of_match_table = of_match_ptr(mp886x_dt_ids),
},
- .probe = mp886x_i2c_probe,
+ .probe_new = mp886x_i2c_probe,
.id_table = mp886x_id,
};
module_i2c_driver(mp886x_regulator_driver);
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index 269c2a6028e8..0a30df5e414f 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -13,9 +13,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/mt6397-regulator.h>
#include <linux/regulator/of_regulator.h>
-
-#define MT6397_BUCK_MODE_AUTO 0
-#define MT6397_BUCK_MODE_FORCE_PWM 1
+#include <dt-bindings/regulator/mediatek,mt6397-regulator.h>
/*
* MT6397 regulators' information
@@ -55,6 +53,7 @@ struct mt6397_regulator_info {
.vsel_mask = vosel_mask, \
.enable_reg = enreg, \
.enable_mask = BIT(0), \
+ .of_map_mode = mt6397_map_mode, \
}, \
.qi = BIT(13), \
.vselon_reg = voselon, \
@@ -146,6 +145,18 @@ static const unsigned int ldo_volt_table7[] = {
1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
};
+static unsigned int mt6397_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case MT6397_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case MT6397_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
static int mt6397_regulator_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 87637eb6bcbc..06c0b15fe4c0 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -532,7 +532,7 @@ static bool of_coupling_find_node(struct device_node *src,
/**
* of_check_coupling_data - Parse rdev's coupling properties and check data
* consistency
- * @rdev - pointer to regulator_dev whose data is checked
+ * @rdev: pointer to regulator_dev whose data is checked
*
* Function checks if all the following conditions are met:
* - rdev's max_spread is greater than 0
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index bfc15dd3f730..4eccf12f39de 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -1,7 +1,7 @@
/*
* pbias-regulator.c
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
* Author: Balaji T K <balajitk@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
new file mode 100644
index 000000000000..eb5822bf53e0
--- /dev/null
+++ b/drivers/regulator/pca9450-regulator.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP.
+ * NXP PCA9450 pmic driver
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/pca9450.h>
+
+struct pc9450_dvs_config {
+ unsigned int run_reg; /* dvs0 */
+ unsigned int run_mask;
+ unsigned int standby_reg; /* dvs1 */
+ unsigned int standby_mask;
+};
+
+struct pca9450_regulator_desc {
+ struct regulator_desc desc;
+ const struct pc9450_dvs_config dvs;
+};
+
+struct pca9450 {
+ struct device *dev;
+ struct regmap *regmap;
+ enum pca9450_chip_type type;
+ unsigned int rcnt;
+ int irq;
+};
+
+static const struct regmap_range pca9450_status_range = {
+ .range_min = PCA9450_REG_INT1,
+ .range_max = PCA9450_REG_PWRON_STAT,
+};
+
+static const struct regmap_access_table pca9450_volatile_regs = {
+ .yes_ranges = &pca9450_status_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config pca9450_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &pca9450_volatile_regs,
+ .max_register = PCA9450_MAX_REGISTER - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/*
+ * BUCK1/2/3
+ * BUCK1RAM[1:0] BUCK1 DVS ramp rate setting
+ * 00: 25mV/1usec
+ * 01: 25mV/2usec
+ * 10: 25mV/4usec
+ * 11: 25mV/8usec
+ */
+static int pca9450_dvs_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ int id = rdev_get_id(rdev);
+ unsigned int ramp_value;
+
+ switch (ramp_delay) {
+ case 1 ... 3125:
+ ramp_value = BUCK1_RAMP_3P125MV;
+ break;
+ case 3126 ... 6250:
+ ramp_value = BUCK1_RAMP_6P25MV;
+ break;
+ case 6251 ... 12500:
+ ramp_value = BUCK1_RAMP_12P5MV;
+ break;
+ case 12501 ... 25000:
+ ramp_value = BUCK1_RAMP_25MV;
+ break;
+ default:
+ ramp_value = BUCK1_RAMP_25MV;
+ }
+
+ return regmap_update_bits(rdev->regmap, PCA9450_REG_BUCK1CTRL + id * 3,
+ BUCK1_RAMP_MASK, ramp_value << 6);
+}
+
+static struct regulator_ops pca9450_dvs_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pca9450_dvs_set_ramp_delay,
+};
+
+static struct regulator_ops pca9450_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops pca9450_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+/*
+ * BUCK1/2/3
+ * 0.60 to 2.1875V (12.5mV step)
+ */
+static const struct linear_range pca9450_dvs_buck_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500),
+};
+
+/*
+ * BUCK4/5/6
+ * 0.6V to 3.4V (25mV step)
+ */
+static const struct linear_range pca9450_buck_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x70, 25000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x71, 0x7F, 0),
+};
+
+/*
+ * LDO1
+ * 1.6 to 3.3V ()
+ */
+static const struct linear_range pca9450_ldo1_volts[] = {
+ REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(3000000, 0x04, 0x07, 100000),
+};
+
+/*
+ * LDO2
+ * 0.8 to 1.15V (50mV step)
+ */
+static const struct linear_range pca9450_ldo2_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x07, 50000),
+};
+
+/*
+ * LDO3/4
+ * 0.8 to 3.3V (100mV step)
+ */
+static const struct linear_range pca9450_ldo34_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x19, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x1A, 0x1F, 0),
+};
+
+/*
+ * LDO5
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct linear_range pca9450_ldo5_volts[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+static int buck_set_dvs(const struct regulator_desc *desc,
+ struct device_node *np, struct regmap *regmap,
+ char *prop, unsigned int reg, unsigned int mask)
+{
+ int ret, i;
+ uint32_t uv;
+
+ ret = of_property_read_u32(np, prop, &uv);
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
+ return ret;
+
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = regmap_update_bits(regmap, reg, mask, i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pca9450_set_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct pca9450_regulator_desc *data = container_of(desc,
+ struct pca9450_regulator_desc, desc);
+ const struct pc9450_dvs_config *dvs = &data->dvs;
+ unsigned int reg, mask;
+ char *prop;
+ int i, ret = 0;
+
+ for (i = 0; i < PCA9450_DVS_LEVEL_MAX; i++) {
+ switch (i) {
+ case PCA9450_DVS_LEVEL_RUN:
+ prop = "nxp,dvs-run-voltage";
+ reg = dvs->run_reg;
+ mask = dvs->run_mask;
+ break;
+ case PCA9450_DVS_LEVEL_STANDBY:
+ prop = "nxp,dvs-standby-voltage";
+ reg = dvs->standby_reg;
+ mask = dvs->standby_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = buck_set_dvs(desc, np, cfg->regmap, prop, reg, mask);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pca9450_regulator_desc pca9450a_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK1,
+ .ops = &pca9450_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK1_VOLTAGE_NUM,
+ .linear_ranges = pca9450_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK1OUT_DVS0,
+ .vsel_mask = BUCK1OUT_DVS0_MASK,
+ .enable_reg = PCA9450_REG_BUCK1CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ .of_parse_cb = pca9450_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PCA9450_REG_BUCK1OUT_DVS0,
+ .run_mask = BUCK1OUT_DVS0_MASK,
+ .standby_reg = PCA9450_REG_BUCK1OUT_DVS1,
+ .standby_mask = BUCK1OUT_DVS1_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK2,
+ .ops = &pca9450_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK2_VOLTAGE_NUM,
+ .linear_ranges = pca9450_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ .vsel_mask = BUCK2OUT_DVS0_MASK,
+ .enable_reg = PCA9450_REG_BUCK2CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ .of_parse_cb = pca9450_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ .run_mask = BUCK2OUT_DVS0_MASK,
+ .standby_reg = PCA9450_REG_BUCK2OUT_DVS1,
+ .standby_mask = BUCK2OUT_DVS1_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK3,
+ .ops = &pca9450_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK3_VOLTAGE_NUM,
+ .linear_ranges = pca9450_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK3OUT_DVS0,
+ .vsel_mask = BUCK3OUT_DVS0_MASK,
+ .enable_reg = PCA9450_REG_BUCK3CTRL,
+ .enable_mask = BUCK3_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ .of_parse_cb = pca9450_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PCA9450_REG_BUCK3OUT_DVS0,
+ .run_mask = BUCK3OUT_DVS0_MASK,
+ .standby_reg = PCA9450_REG_BUCK3OUT_DVS1,
+ .standby_mask = BUCK3OUT_DVS1_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK4,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK4OUT,
+ .vsel_mask = BUCK4OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK4CTRL,
+ .enable_mask = BUCK4_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK5,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK5_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK5OUT,
+ .vsel_mask = BUCK5OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK5CTRL,
+ .enable_mask = BUCK5_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK6,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK6_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK6OUT,
+ .vsel_mask = BUCK6OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK6CTRL,
+ .enable_mask = BUCK6_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO1,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO1_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts),
+ .vsel_reg = PCA9450_REG_LDO1CTRL,
+ .vsel_mask = LDO1OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO1CTRL,
+ .enable_mask = LDO1_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO2,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO2_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts),
+ .vsel_reg = PCA9450_REG_LDO2CTRL,
+ .vsel_mask = LDO2OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO2CTRL,
+ .enable_mask = LDO2_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO3,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO3CTRL,
+ .vsel_mask = LDO3OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO3CTRL,
+ .enable_mask = LDO3_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO4,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO4_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO4CTRL,
+ .vsel_mask = LDO4OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO4CTRL,
+ .enable_mask = LDO4_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO5,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo5_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
+ .vsel_reg = PCA9450_REG_LDO5CTRL_H,
+ .vsel_mask = LDO5HOUT_MASK,
+ .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_mask = LDO5H_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+/*
+ * Buck3 removed on PCA9450B and connected with Buck1 internal for dual phase
+ * on PCA9450C as no Buck3.
+ */
+static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK1,
+ .ops = &pca9450_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK1_VOLTAGE_NUM,
+ .linear_ranges = pca9450_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK1OUT_DVS0,
+ .vsel_mask = BUCK1OUT_DVS0_MASK,
+ .enable_reg = PCA9450_REG_BUCK1CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ .of_parse_cb = pca9450_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PCA9450_REG_BUCK1OUT_DVS0,
+ .run_mask = BUCK1OUT_DVS0_MASK,
+ .standby_reg = PCA9450_REG_BUCK1OUT_DVS1,
+ .standby_mask = BUCK1OUT_DVS1_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK2,
+ .ops = &pca9450_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK2_VOLTAGE_NUM,
+ .linear_ranges = pca9450_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ .vsel_mask = BUCK2OUT_DVS0_MASK,
+ .enable_reg = PCA9450_REG_BUCK2CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ .of_parse_cb = pca9450_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PCA9450_REG_BUCK2OUT_DVS0,
+ .run_mask = BUCK2OUT_DVS0_MASK,
+ .standby_reg = PCA9450_REG_BUCK2OUT_DVS1,
+ .standby_mask = BUCK2OUT_DVS1_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK4,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK4OUT,
+ .vsel_mask = BUCK4OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK4CTRL,
+ .enable_mask = BUCK4_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK5,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK5_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK5OUT,
+ .vsel_mask = BUCK5OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK5CTRL,
+ .enable_mask = BUCK5_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_BUCK6,
+ .ops = &pca9450_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_BUCK6_VOLTAGE_NUM,
+ .linear_ranges = pca9450_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts),
+ .vsel_reg = PCA9450_REG_BUCK6OUT,
+ .vsel_mask = BUCK6OUT_MASK,
+ .enable_reg = PCA9450_REG_BUCK6CTRL,
+ .enable_mask = BUCK6_ENMODE_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO1,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO1_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts),
+ .vsel_reg = PCA9450_REG_LDO1CTRL,
+ .vsel_mask = LDO1OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO1CTRL,
+ .enable_mask = LDO1_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO2,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO2_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts),
+ .vsel_reg = PCA9450_REG_LDO2CTRL,
+ .vsel_mask = LDO2OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO2CTRL,
+ .enable_mask = LDO2_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO3,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO3_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO3CTRL,
+ .vsel_mask = LDO3OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO3CTRL,
+ .enable_mask = LDO3_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO4,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO4_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo34_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts),
+ .vsel_reg = PCA9450_REG_LDO4CTRL,
+ .vsel_mask = LDO4OUT_MASK,
+ .enable_reg = PCA9450_REG_LDO4CTRL,
+ .enable_mask = LDO4_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PCA9450_LDO5,
+ .ops = &pca9450_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
+ .linear_ranges = pca9450_ldo5_volts,
+ .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
+ .vsel_reg = PCA9450_REG_LDO5CTRL_H,
+ .vsel_mask = LDO5HOUT_MASK,
+ .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_mask = LDO5H_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+static irqreturn_t pca9450_irq_handler(int irq, void *data)
+{
+ struct pca9450 *pca9450 = data;
+ struct regmap *regmap = pca9450->regmap;
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(regmap, PCA9450_REG_INT1, &status);
+ if (ret < 0) {
+ dev_err(pca9450->dev,
+ "Failed to read INT1(%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (status & IRQ_PWRON)
+ dev_warn(pca9450->dev, "PWRON interrupt.\n");
+
+ if (status & IRQ_WDOGB)
+ dev_warn(pca9450->dev, "WDOGB interrupt.\n");
+
+ if (status & IRQ_VR_FLT1)
+ dev_warn(pca9450->dev, "VRFLT1 interrupt.\n");
+
+ if (status & IRQ_VR_FLT2)
+ dev_warn(pca9450->dev, "VRFLT2 interrupt.\n");
+
+ if (status & IRQ_LOWVSYS)
+ dev_warn(pca9450->dev, "LOWVSYS interrupt.\n");
+
+ if (status & IRQ_THERM_105)
+ dev_warn(pca9450->dev, "IRQ_THERM_105 interrupt.\n");
+
+ if (status & IRQ_THERM_125)
+ dev_warn(pca9450->dev, "IRQ_THERM_125 interrupt.\n");
+
+ return IRQ_HANDLED;
+}
+
+static int pca9450_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ enum pca9450_chip_type type = (unsigned int)(uintptr_t)
+ of_device_get_match_data(&i2c->dev);
+ const struct pca9450_regulator_desc *regulator_desc;
+ struct regulator_config config = { };
+ struct pca9450 *pca9450;
+ unsigned int device_id, i;
+ int ret;
+
+ if (!i2c->irq) {
+ dev_err(&i2c->dev, "No IRQ configured?\n");
+ return -EINVAL;
+ }
+
+ pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL);
+ if (!pca9450)
+ return -ENOMEM;
+
+ switch (type) {
+ case PCA9450_TYPE_PCA9450A:
+ regulator_desc = pca9450a_regulators;
+ pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators);
+ break;
+ case PCA9450_TYPE_PCA9450BC:
+ regulator_desc = pca9450bc_regulators;
+ pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators);
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown device type");
+ return -EINVAL;
+ }
+
+ pca9450->irq = i2c->irq;
+ pca9450->type = type;
+ pca9450->dev = &i2c->dev;
+
+ dev_set_drvdata(&i2c->dev, pca9450);
+
+ pca9450->regmap = devm_regmap_init_i2c(i2c,
+ &pca9450_regmap_config);
+ if (IS_ERR(pca9450->regmap)) {
+ dev_err(&i2c->dev, "regmap initialization failed\n");
+ return PTR_ERR(pca9450->regmap);
+ }
+
+ ret = regmap_read(pca9450->regmap, PCA9450_REG_DEV_ID, &device_id);
+ if (ret) {
+ dev_err(&i2c->dev, "Read device id error\n");
+ return ret;
+ }
+
+ /* Check your board and dts for match the right pmic */
+ if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) ||
+ ((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC)) {
+ dev_err(&i2c->dev, "Device id(%x) mismatched\n",
+ device_id >> 4);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pca9450->rcnt; i++) {
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+ const struct pca9450_regulator_desc *r;
+
+ r = &regulator_desc[i];
+ desc = &r->desc;
+
+ config.regmap = pca9450->regmap;
+ config.dev = pca9450->dev;
+
+ rdev = devm_regulator_register(pca9450->dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(pca9450->dev,
+ "Failed to register regulator(%s): %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ }
+
+ ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
+ pca9450_irq_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ "pca9450-irq", pca9450);
+ if (ret != 0) {
+ dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
+ pca9450->irq);
+ return ret;
+ }
+ /* Unmask all interrupt except PWRON/WDOG/RSVD */
+ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK,
+ IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS |
+ IRQ_THERM_105 | IRQ_THERM_125,
+ IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD);
+ if (ret) {
+ dev_err(&i2c->dev, "Unmask irq error\n");
+ return ret;
+ }
+
+ dev_info(&i2c->dev, "%s probed.\n",
+ type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc");
+
+ return 0;
+}
+
+static const struct of_device_id pca9450_of_match[] = {
+ {
+ .compatible = "nxp,pca9450a",
+ .data = (void *)PCA9450_TYPE_PCA9450A,
+ },
+ {
+ .compatible = "nxp,pca9450b",
+ .data = (void *)PCA9450_TYPE_PCA9450BC,
+ },
+ {
+ .compatible = "nxp,pca9450c",
+ .data = (void *)PCA9450_TYPE_PCA9450BC,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pca9450_of_match);
+
+static struct i2c_driver pca9450_i2c_driver = {
+ .driver = {
+ .name = "nxp-pca9450",
+ .of_match_table = pca9450_of_match,
+ },
+ .probe = pca9450_i2c_probe,
+};
+
+module_i2c_driver(pca9450_i2c_driver);
+
+MODULE_AUTHOR("Robin Gong <yibin.gong@nxp.com>");
+MODULE_DESCRIPTION("NXP PCA9450 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 4c8e8b472287..7e8ba9246167 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -128,7 +128,7 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
bool reg_has_ramp_delay;
- unsigned int ramp_bits;
+ unsigned int ramp_bits = 0;
int ret;
switch (pfuze100->chip_id) {
@@ -149,8 +149,11 @@ static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
}
if (reg_has_ramp_delay) {
- ramp_delay = 12500 / ramp_delay;
- ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
+ if (ramp_delay > 0) {
+ ramp_delay = 12500 / ramp_delay;
+ ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
+ }
+
ret = regmap_update_bits(pfuze100->regmap,
rdev->desc->vsel_reg + 4,
0xc0, ramp_bits << 6);
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 638329bd0745..3234b118b53e 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -48,7 +48,7 @@ struct pwm_voltages {
unsigned int dutycycle;
};
-/**
+/*
* Voltage table call-backs
*/
static void pwm_regulator_init_state(struct regulator_dev *rdev)
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
new file mode 100644
index 000000000000..8c7dd1928380
--- /dev/null
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define REG_PERPH_TYPE 0x04
+
+#define QCOM_LAB_TYPE 0x24
+#define QCOM_IBB_TYPE 0x20
+
+#define PMI8998_LAB_REG_BASE 0xde00
+#define PMI8998_IBB_REG_BASE 0xdc00
+
+#define REG_LABIBB_STATUS1 0x08
+#define REG_LABIBB_ENABLE_CTL 0x46
+#define LABIBB_STATUS1_VREG_OK_BIT BIT(7)
+#define LABIBB_CONTROL_ENABLE BIT(7)
+
+#define LAB_ENABLE_CTL_MASK BIT(7)
+#define IBB_ENABLE_CTL_MASK (BIT(7) | BIT(6))
+
+#define LABIBB_OFF_ON_DELAY 1000
+#define LAB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 2)
+#define IBB_ENABLE_TIME (LABIBB_OFF_ON_DELAY * 10)
+#define LABIBB_POLL_ENABLED_TIME 1000
+
+struct labibb_regulator {
+ struct regulator_desc desc;
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_dev *rdev;
+ u16 base;
+ u8 type;
+};
+
+struct labibb_regulator_data {
+ const char *name;
+ u8 type;
+ u16 base;
+ struct regulator_desc *desc;
+};
+
+static struct regulator_ops qcom_labibb_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static struct regulator_desc pmi8998_lab_desc = {
+ .enable_mask = LAB_ENABLE_CTL_MASK,
+ .enable_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_ENABLE_CTL),
+ .enable_val = LABIBB_CONTROL_ENABLE,
+ .enable_time = LAB_ENABLE_TIME,
+ .poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
+ .off_on_delay = LABIBB_OFF_ON_DELAY,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &qcom_labibb_ops,
+};
+
+static struct regulator_desc pmi8998_ibb_desc = {
+ .enable_mask = IBB_ENABLE_CTL_MASK,
+ .enable_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_ENABLE_CTL),
+ .enable_val = LABIBB_CONTROL_ENABLE,
+ .enable_time = IBB_ENABLE_TIME,
+ .poll_enabled_time = LABIBB_POLL_ENABLED_TIME,
+ .off_on_delay = LABIBB_OFF_ON_DELAY,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &qcom_labibb_ops,
+};
+
+static const struct labibb_regulator_data pmi8998_labibb_data[] = {
+ {"lab", QCOM_LAB_TYPE, PMI8998_LAB_REG_BASE, &pmi8998_lab_desc},
+ {"ibb", QCOM_IBB_TYPE, PMI8998_IBB_REG_BASE, &pmi8998_ibb_desc},
+ { },
+};
+
+static const struct of_device_id qcom_labibb_match[] = {
+ { .compatible = "qcom,pmi8998-lab-ibb", .data = &pmi8998_labibb_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_labibb_match);
+
+static int qcom_labibb_regulator_probe(struct platform_device *pdev)
+{
+ struct labibb_regulator *vreg;
+ struct device *dev = &pdev->dev;
+ struct regulator_config cfg = {};
+
+ const struct of_device_id *match;
+ const struct labibb_regulator_data *reg_data;
+ struct regmap *reg_regmap;
+ unsigned int type;
+ int ret;
+
+ reg_regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!reg_regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(qcom_labibb_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ for (reg_data = match->data; reg_data->name; reg_data++) {
+
+ /* Validate if the type of regulator is indeed
+ * what's mentioned in DT.
+ */
+ ret = regmap_read(reg_regmap, reg_data->base + REG_PERPH_TYPE,
+ &type);
+ if (ret < 0) {
+ dev_err(dev,
+ "Peripheral type read failed ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ if (WARN_ON((type != QCOM_LAB_TYPE) && (type != QCOM_IBB_TYPE)) ||
+ WARN_ON(type != reg_data->type))
+ return -EINVAL;
+
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg),
+ GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->regmap = reg_regmap;
+ vreg->dev = dev;
+ vreg->base = reg_data->base;
+ vreg->type = reg_data->type;
+
+ memcpy(&vreg->desc, reg_data->desc, sizeof(vreg->desc));
+ vreg->desc.of_match = reg_data->name;
+ vreg->desc.name = reg_data->name;
+
+ cfg.dev = vreg->dev;
+ cfg.driver_data = vreg;
+ cfg.regmap = vreg->regmap;
+
+ vreg->rdev = devm_regulator_register(vreg->dev, &vreg->desc,
+ &cfg);
+
+ if (IS_ERR(vreg->rdev)) {
+ dev_err(dev, "qcom_labibb: error registering %s : %d\n",
+ reg_data->name, ret);
+ return PTR_ERR(vreg->rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver qcom_labibb_regulator_driver = {
+ .driver = {
+ .name = "qcom-lab-ibb-regulator",
+ .of_match_table = qcom_labibb_match,
+ },
+ .probe = qcom_labibb_regulator_probe,
+};
+module_platform_driver(qcom_labibb_regulator_driver);
+
+MODULE_DESCRIPTION("Qualcomm labibb driver");
+MODULE_AUTHOR("Nisha Kumari <nishakumari@codeaurora.org>");
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 79bdc129cb50..08dcc614efa7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -22,9 +22,9 @@
/**
* enum rpmh_regulator_type - supported RPMh accelerator types
- * %VRM: RPMh VRM accelerator which supports voting on enable, voltage,
+ * @VRM: RPMh VRM accelerator which supports voting on enable, voltage,
* and mode of LDO, SMPS, and BOB type PMIC regulators.
- * %XOB: RPMh XOB accelerator which supports voting on the enable state
+ * @XOB: RPMh XOB accelerator which supports voting on the enable state
* of PMIC regulators.
*/
enum rpmh_regulator_type {
@@ -399,13 +399,13 @@ static const struct regulator_ops rpmh_regulator_xob_ops = {
/**
* rpmh_regulator_init_vreg() - initialize all attributes of an rpmh-regulator
- * vreg: Pointer to the individual rpmh-regulator resource
- * dev: Pointer to the top level rpmh-regulator PMIC device
- * node: Pointer to the individual rpmh-regulator resource
+ * @vreg: Pointer to the individual rpmh-regulator resource
+ * @dev: Pointer to the top level rpmh-regulator PMIC device
+ * @node: Pointer to the individual rpmh-regulator resource
* device node
- * pmic_id: String used to identify the top level rpmh-regulator
+ * @pmic_id: String used to identify the top level rpmh-regulator
* PMIC device on the board
- * pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator
+ * @pmic_rpmh_data: Pointer to a null-terminated array of rpmh-regulator
* resources defined for the top level PMIC device
*
* Return: 0 on success, errno on failure
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 0066f850f15d..7f9d66ac37ff 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -407,7 +407,7 @@ static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
return ret;
}
-static struct regulator_ops uV_ops = {
+static const struct regulator_ops uV_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = rpm_reg_set_uV_sel,
@@ -420,7 +420,7 @@ static struct regulator_ops uV_ops = {
.set_load = rpm_reg_set_load,
};
-static struct regulator_ops mV_ops = {
+static const struct regulator_ops mV_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = rpm_reg_set_mV_sel,
@@ -433,7 +433,7 @@ static struct regulator_ops mV_ops = {
.set_load = rpm_reg_set_load,
};
-static struct regulator_ops switch_ops = {
+static const struct regulator_ops switch_ops = {
.enable = rpm_reg_switch_enable,
.disable = rpm_reg_switch_disable,
.is_enabled = rpm_reg_is_enabled,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 7f5c318c8259..a87b56bc29fa 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -198,6 +198,15 @@ static const struct regulator_ops rpm_bob_ops = {
.set_voltage = rpm_reg_set_voltage,
};
+static const struct regulator_ops rpm_mp5496_ops = {
+ .enable = rpm_reg_enable,
+ .disable = rpm_reg_disable,
+ .is_enabled = rpm_reg_is_enabled,
+ .list_voltage = regulator_list_voltage_linear_range,
+
+ .set_voltage = rpm_reg_set_voltage,
+};
+
static const struct regulator_desc pma8084_hfsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
@@ -474,15 +483,6 @@ static const struct regulator_desc pmi8994_bby = {
.ops = &rpm_bob_ops,
};
-static const struct regulator_desc pmi8994_boost = {
- .linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
- },
- .n_linear_ranges = 1,
- .n_voltages = 31,
- .ops = &rpm_smps_ldo_ops,
-};
-
static const struct regulator_desc pm8998_ftsmps = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
@@ -595,6 +595,24 @@ static const struct regulator_desc pms405_pldo600 = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc mp5496_smpa2 = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(725000, 0, 27, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 28,
+ .ops = &rpm_mp5496_ops,
+};
+
+static const struct regulator_desc mp5496_ldoa2 = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 60, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 61,
+ .ops = &rpm_mp5496_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -603,6 +621,12 @@ struct rpm_regulator_data {
const char *supply;
};
+static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smpa2, "s2" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
@@ -901,6 +925,7 @@ static const struct rpm_regulator_data rpm_pms405_regulators[] = {
};
static const struct of_device_id rpm_of_match[] = {
+ { .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 95737e4dd6bb..5ee7c5305d95 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -380,7 +380,7 @@ struct spmi_regulator_mapping {
enum spmi_regulator_logical_type logical_type;
u32 revision_min;
u32 revision_max;
- struct regulator_ops *ops;
+ const struct regulator_ops *ops;
struct spmi_voltage_set_points *set_points;
int hpm_min_load;
};
@@ -1261,7 +1261,7 @@ spmi_regulator_saw_set_voltage(struct regulator_dev *rdev, unsigned selector)
static struct regulator_ops spmi_saw_ops = {};
-static struct regulator_ops spmi_smps_ops = {
+static const struct regulator_ops spmi_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1276,7 +1276,7 @@ static struct regulator_ops spmi_smps_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
};
-static struct regulator_ops spmi_ldo_ops = {
+static const struct regulator_ops spmi_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1293,7 +1293,7 @@ static struct regulator_ops spmi_ldo_ops = {
.set_soft_start = spmi_regulator_common_set_soft_start,
};
-static struct regulator_ops spmi_ln_ldo_ops = {
+static const struct regulator_ops spmi_ln_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1305,7 +1305,7 @@ static struct regulator_ops spmi_ln_ldo_ops = {
.get_bypass = spmi_regulator_common_get_bypass,
};
-static struct regulator_ops spmi_vs_ops = {
+static const struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1316,7 +1316,7 @@ static struct regulator_ops spmi_vs_ops = {
.get_mode = spmi_regulator_common_get_mode,
};
-static struct regulator_ops spmi_boost_ops = {
+static const struct regulator_ops spmi_boost_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1327,7 +1327,7 @@ static struct regulator_ops spmi_boost_ops = {
.set_input_current_limit = spmi_regulator_set_ilim,
};
-static struct regulator_ops spmi_ftsmps_ops = {
+static const struct regulator_ops spmi_ftsmps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1342,7 +1342,7 @@ static struct regulator_ops spmi_ftsmps_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
};
-static struct regulator_ops spmi_ult_lo_smps_ops = {
+static const struct regulator_ops spmi_ult_lo_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1356,7 +1356,7 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
};
-static struct regulator_ops spmi_ult_ho_smps_ops = {
+static const struct regulator_ops spmi_ult_ho_smps_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1371,7 +1371,7 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
};
-static struct regulator_ops spmi_ult_ldo_ops = {
+static const struct regulator_ops spmi_ult_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1388,7 +1388,7 @@ static struct regulator_ops spmi_ult_ldo_ops = {
.set_soft_start = spmi_regulator_common_set_soft_start,
};
-static struct regulator_ops spmi_ftsmps426_ops = {
+static const struct regulator_ops spmi_ftsmps426_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1403,7 +1403,7 @@ static struct regulator_ops spmi_ftsmps426_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
};
-static struct regulator_ops spmi_hfs430_ops = {
+static const struct regulator_ops spmi_hfs430_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
new file mode 100644
index 000000000000..8ba947f3585f
--- /dev/null
+++ b/drivers/regulator/qcom_usb_vbus-regulator.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Qualcomm PMIC VBUS output regulator driver
+//
+// Copyright (c) 2020, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regmap.h>
+
+#define CMD_OTG 0x40
+#define OTG_EN BIT(0)
+#define OTG_CFG 0x53
+#define OTG_EN_SRC_CFG BIT(1)
+
+static const struct regulator_ops qcom_usb_vbus_reg_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static struct regulator_desc qcom_usb_vbus_rdesc = {
+ .name = "usb_vbus",
+ .ops = &qcom_usb_vbus_reg_ops,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+};
+
+static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_init_data *init_data;
+ int ret;
+ u32 base;
+
+ ret = of_property_read_u32(dev->of_node, "reg", &base);
+ if (ret < 0) {
+ dev_err(dev, "no base address found\n");
+ return ret;
+ }
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap) {
+ dev_err(dev, "Failed to get regmap\n");
+ return -ENOENT;
+ }
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &qcom_usb_vbus_rdesc);
+ if (!init_data)
+ return -ENOMEM;
+
+ qcom_usb_vbus_rdesc.enable_reg = base + CMD_OTG;
+ qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
+ config.dev = dev;
+ config.init_data = init_data;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(dev, &qcom_usb_vbus_rdesc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "not able to register vbus reg %d\n", ret);
+ return ret;
+ }
+
+ /* Disable HW logic for VBUS enable */
+ regmap_update_bits(regmap, base + OTG_CFG, OTG_EN_SRC_CFG, 0);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_usb_vbus_regulator_match[] = {
+ { .compatible = "qcom,pm8150b-vbus-reg" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_usb_vbus_regulator_match);
+
+static struct platform_driver qcom_usb_vbus_regulator_driver = {
+ .driver = {
+ .name = "qcom-usb-vbus-regulator",
+ .of_match_table = qcom_usb_vbus_regulator_match,
+ },
+ .probe = qcom_usb_vbus_regulator_probe,
+};
+module_platform_driver(qcom_usb_vbus_regulator_driver);
+
+MODULE_DESCRIPTION("Qualcomm USB vbus regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index adc9973d1b2f..73e0ab2baeaa 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -15,7 +15,7 @@
#include <dt-bindings/mfd/st,stpmic1.h>
/**
- * stpmic1 regulator description: this structure is used as driver data
+ * struct stpmic1 regulator description: this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
diff --git a/drivers/regulator/sy8827n.c b/drivers/regulator/sy8827n.c
new file mode 100644
index 000000000000..b207217f74d8
--- /dev/null
+++ b/drivers/regulator/sy8827n.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SY8827N regulator driver
+//
+// Copyright (C) 2020 Synaptics Incorporated
+//
+// Author: Jisheng Zhang <jszhang@kernel.org>
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define SY8827N_VSEL0 0
+#define SY8827N_BUCK_EN (1 << 7)
+#define SY8827N_MODE (1 << 6)
+#define SY8827N_VSEL1 1
+#define SY8827N_CTRL 2
+
+#define SY8827N_NVOLTAGES 64
+#define SY8827N_VSELMIN 600000
+#define SY8827N_VSELSTEP 12500
+
+struct sy8827n_device_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_init_data *regulator;
+ struct gpio_desc *en_gpio;
+ unsigned int vsel_reg;
+};
+
+static int sy8827n_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct sy8827n_device_info *di = rdev_get_drvdata(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ regmap_update_bits(rdev->regmap, di->vsel_reg,
+ SY8827N_MODE, SY8827N_MODE);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ regmap_update_bits(rdev->regmap, di->vsel_reg,
+ SY8827N_MODE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int sy8827n_get_mode(struct regulator_dev *rdev)
+{
+ struct sy8827n_device_info *di = rdev_get_drvdata(rdev);
+ u32 val;
+ int ret = 0;
+
+ ret = regmap_read(rdev->regmap, di->vsel_reg, &val);
+ if (ret < 0)
+ return ret;
+ if (val & SY8827N_MODE)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops sy8827n_regulator_ops = {
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .map_voltage = regulator_map_voltage_linear,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = sy8827n_set_mode,
+ .get_mode = sy8827n_get_mode,
+};
+
+static int sy8827n_regulator_register(struct sy8827n_device_info *di,
+ struct regulator_config *config)
+{
+ struct regulator_desc *rdesc = &di->desc;
+ struct regulator_dev *rdev;
+
+ rdesc->name = "sy8827n-reg";
+ rdesc->supply_name = "vin";
+ rdesc->ops = &sy8827n_regulator_ops;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->n_voltages = SY8827N_NVOLTAGES;
+ rdesc->enable_reg = di->vsel_reg;
+ rdesc->enable_mask = SY8827N_BUCK_EN;
+ rdesc->min_uV = SY8827N_VSELMIN;
+ rdesc->uV_step = SY8827N_VSELSTEP;
+ rdesc->vsel_reg = di->vsel_reg;
+ rdesc->vsel_mask = rdesc->n_voltages - 1;
+ rdesc->owner = THIS_MODULE;
+
+ rdev = devm_regulator_register(di->dev, &di->desc, config);
+ return PTR_ERR_OR_ZERO(rdev);
+}
+
+static const struct regmap_config sy8827n_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int sy8827n_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct sy8827n_device_info *di;
+ struct regulator_config config = { };
+ struct regmap *regmap;
+ int ret;
+
+ di = devm_kzalloc(dev, sizeof(struct sy8827n_device_info), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ di->regulator = of_get_regulator_init_data(dev, np, &di->desc);
+ if (!di->regulator) {
+ dev_err(dev, "Platform data not found!\n");
+ return -EINVAL;
+ }
+
+ di->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(di->en_gpio))
+ return PTR_ERR(di->en_gpio);
+
+ if (of_property_read_bool(np, "silergy,vsel-state-high"))
+ di->vsel_reg = SY8827N_VSEL1;
+ else
+ di->vsel_reg = SY8827N_VSEL0;
+
+ di->dev = dev;
+
+ regmap = devm_regmap_init_i2c(client, &sy8827n_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+ i2c_set_clientdata(client, di);
+
+ config.dev = di->dev;
+ config.init_data = di->regulator;
+ config.regmap = regmap;
+ config.driver_data = di;
+ config.of_node = np;
+
+ ret = sy8827n_regulator_register(di, &config);
+ if (ret < 0)
+ dev_err(dev, "Failed to register regulator!\n");
+ return ret;
+}
+
+static const struct of_device_id sy8827n_dt_ids[] = {
+ {
+ .compatible = "silergy,sy8827n",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sy8827n_dt_ids);
+
+static const struct i2c_device_id sy8827n_id[] = {
+ { "sy8827n", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sy8827n_id);
+
+static struct i2c_driver sy8827n_regulator_driver = {
+ .driver = {
+ .name = "sy8827n-regulator",
+ .of_match_table = of_match_ptr(sy8827n_dt_ids),
+ },
+ .probe_new = sy8827n_i2c_probe,
+ .id_table = sy8827n_id,
+};
+module_i2c_driver(sy8827n_regulator_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_DESCRIPTION("SY8827N regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 5ca6d2130593..795d459ff3cf 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -3,7 +3,7 @@
*
* Supports TPS65023 Regulator
*
- * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
+ * Copyright (C) 2009 Texas Instrument Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index d2a8f69b2665..eafbc2bb4b57 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -3,7 +3,7 @@
*
* Regulator driver for TPS65073 PMIC
*
- * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
+ * Copyright (C) 2009 Texas Instrument Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 9910e949373c..23528475a962 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
*
* Author: Andrew F. Davis <afd@ti.com>
*
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index d27dbbafcf72..e88ed96f4744 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -3,7 +3,7 @@
*
* Regulator driver for TPS65217 PMIC
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -124,7 +124,7 @@ static int tps65217_pmic_set_suspend_enable(struct regulator_dev *dev)
struct tps65217 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
+ if (rid > TPS65217_LDO_4)
return -EINVAL;
return tps65217_clear_bits(tps, dev->desc->bypass_reg,
@@ -137,7 +137,7 @@ static int tps65217_pmic_set_suspend_disable(struct regulator_dev *dev)
struct tps65217 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
+ if (rid > TPS65217_LDO_4)
return -EINVAL;
if (!tps->strobes[rid])
@@ -254,6 +254,9 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
/* Store default strobe info */
ret = tps65217_reg_read(tps, regulators[i].bypass_reg, &val);
+ if (ret)
+ return ret;
+
tps->strobes[i] = val & regulators[i].bypass_mask;
}
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 05d13f807918..fa263545a70e 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -3,7 +3,7 @@
*
* Regulator driver for TPS65218 PMIC
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2 as
@@ -128,7 +128,7 @@ static int tps65218_pmic_set_suspend_enable(struct regulator_dev *dev)
struct tps65218 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
- if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ if (rid > TPS65218_LDO_1)
return -EINVAL;
return tps65218_clear_bits(tps, dev->desc->bypass_reg,
@@ -141,7 +141,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
struct tps65218 *tps = rdev_get_drvdata(dev);
unsigned int rid = rdev_get_id(dev);
- if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ if (rid > TPS65218_LDO_1)
return -EINVAL;
/*
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 15c79931ea89..63d6bbd4969b 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -1,7 +1,7 @@
/*
* Regulator driver for TI TPS65912x PMICs
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index ae5f0e7fce8b..2e7bfdf7c87b 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1216,11 +1216,11 @@ EXPORT_SYMBOL_GPL(wm8350_register_regulator);
/**
* wm8350_register_led - Register a WM8350 LED output
*
- * @param wm8350 The WM8350 device to configure.
- * @param lednum LED device index to create.
- * @param dcdc The DCDC to use for the LED.
- * @param isink The ISINK to use for the LED.
- * @param pdata Configuration for the LED.
+ * @wm8350: The WM8350 device to configure.
+ * @lednum: LED device index to create.
+ * @dcdc: The DCDC to use for the LED.
+ * @isink: The ISINK to use for the LED.
+ * @pdata: Configuration for the LED.
*
* The WM8350 supports the use of an ISINK together with a DCDC to
* provide a power-efficient LED driver. This function registers the
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 4cb1fbb59722..e9fd13707721 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -234,9 +234,9 @@ static struct platform_driver wm8400_regulator_driver = {
* the regulator API. It is intended to be called from the
* platform_init() callback of the WM8400 MFD driver.
*
- * @param dev The WM8400 device to operate on.
- * @param reg The regulator to control.
- * @param initdata Regulator initdata for the regulator.
+ * @dev: The WM8400 device to operate on.
+ * @reg: The regulator to control.
+ * @initdata: Regulator initdata for the regulator.
*/
int wm8400_register_regulator(struct device *dev, int reg,
struct regulator_init_data *initdata)
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
index 854238444616..effc177db80a 100644
--- a/drivers/reset/reset-intel-gw.c
+++ b/drivers/reset/reset-intel-gw.c
@@ -15,9 +15,9 @@
#define RCU_RST_STAT 0x0024
#define RCU_RST_REQ 0x0048
-#define REG_OFFSET GENMASK(31, 16)
-#define BIT_OFFSET GENMASK(15, 8)
-#define STAT_BIT_OFFSET GENMASK(7, 0)
+#define REG_OFFSET_MASK GENMASK(31, 16)
+#define BIT_OFFSET_MASK GENMASK(15, 8)
+#define STAT_BIT_OFFSET_MASK GENMASK(7, 0)
#define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
@@ -51,11 +51,11 @@ static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
unsigned long id, u32 *rst_req,
u32 *req_bit, u32 *stat_bit)
{
- *rst_req = FIELD_GET(REG_OFFSET, id);
- *req_bit = FIELD_GET(BIT_OFFSET, id);
+ *rst_req = FIELD_GET(REG_OFFSET_MASK, id);
+ *req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
if (data->soc_data->legacy)
- *stat_bit = FIELD_GET(STAT_BIT_OFFSET, id);
+ *stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
else
*stat_bit = *req_bit;
@@ -141,14 +141,14 @@ static int intel_reset_xlate(struct reset_controller_dev *rcdev,
if (spec->args[1] > 31)
return -EINVAL;
- id = FIELD_PREP(REG_OFFSET, spec->args[0]);
- id |= FIELD_PREP(BIT_OFFSET, spec->args[1]);
+ id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
+ id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
if (data->soc_data->legacy) {
if (spec->args[2] > 31)
return -EINVAL;
- id |= FIELD_PREP(STAT_BIT_OFFSET, spec->args[2]);
+ id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
}
return id;
@@ -210,11 +210,11 @@ static int intel_reset_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->reboot_id = FIELD_PREP(REG_OFFSET, rb_id[0]);
- data->reboot_id |= FIELD_PREP(BIT_OFFSET, rb_id[1]);
+ data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
+ data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
if (data->soc_data->legacy)
- data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET, rb_id[2]);
+ data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
data->restart_nb.notifier_call = intel_reset_restart_handler;
data->restart_nb.priority = 128;
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 067e7e7b34f1..e066614818a3 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -11,6 +11,7 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,10 +19,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/spinlock.h>
-#include "reset-simple.h"
-
static inline struct reset_simple_data *
to_reset_simple_data(struct reset_controller_dev *rcdev)
{
@@ -64,6 +64,24 @@ static int reset_simple_deassert(struct reset_controller_dev *rcdev,
return reset_simple_update(rcdev, id, false);
}
+static int reset_simple_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int ret;
+
+ if (!data->reset_us)
+ return -ENOTSUPP;
+
+ ret = reset_simple_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(data->reset_us, data->reset_us * 2);
+
+ return reset_simple_deassert(rcdev, id);
+}
+
static int reset_simple_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -81,6 +99,7 @@ static int reset_simple_status(struct reset_controller_dev *rcdev,
const struct reset_control_ops reset_simple_ops = {
.assert = reset_simple_assert,
.deassert = reset_simple_deassert,
+ .reset = reset_simple_reset,
.status = reset_simple_status,
};
EXPORT_SYMBOL_GPL(reset_simple_ops);
diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h
deleted file mode 100644
index 08ccb25a55e6..000000000000
--- a/drivers/reset/reset-simple.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Simple Reset Controller ops
- *
- * Based on Allwinner SoCs Reset Controller driver
- *
- * Copyright 2013 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- */
-
-#ifndef __RESET_SIMPLE_H__
-#define __RESET_SIMPLE_H__
-
-#include <linux/io.h>
-#include <linux/reset-controller.h>
-#include <linux/spinlock.h>
-
-/**
- * struct reset_simple_data - driver data for simple reset controllers
- * @lock: spinlock to protect registers during read-modify-write cycles
- * @membase: memory mapped I/O register range
- * @rcdev: reset controller device base structure
- * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
- * are set to assert the reset. Note that this says nothing about
- * the voltage level of the actual reset line.
- * @status_active_low: if true, bits read back as cleared while the reset is
- * asserted. Otherwise, bits read back as set while the
- * reset is asserted.
- */
-struct reset_simple_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
- bool active_low;
- bool status_active_low;
-};
-
-extern const struct reset_control_ops reset_simple_ops;
-
-#endif /* __RESET_SIMPLE_H__ */
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 96953992c2bb..bdd984296196 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -11,13 +11,12 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/reset/socfpga.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include "reset-simple.h"
-
#define SOCFPGA_NR_BANKS 8
static int a10_reset_init(struct device_node *np)
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index e7f169e57bcf..e752594b6971 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -14,13 +14,12 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include "reset-simple.h"
-
static int sunxi_reset_init(struct device_node *np)
{
struct reset_simple_data *data;
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
index bf68729ab729..b799aefad547 100644
--- a/drivers/reset/reset-ti-sci.c
+++ b/drivers/reset/reset-ti-sci.c
@@ -1,7 +1,7 @@
/*
* Texas Instrument's System Control Interface (TI-SCI) reset driver
*
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index a2635c21db7f..ef97c4dbbb4e 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -1,7 +1,7 @@
/*
* TI SYSCON regmap reset driver
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
* Suman Anna <afd@ti.com>
*
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 2b188b3bb69a..027990b79f61 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -9,8 +9,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-
-#include "reset-simple.h"
+#include <linux/reset/reset-simple.h>
#define MAX_CLKS 2
#define MAX_RSTS 2
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b54d87d45c89..f3b8e6dcd879 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1729,15 +1729,6 @@ config RTC_DRV_TEGRA
This drive can also be built as a module. If so, the module
will be called rtc-tegra.
-config RTC_DRV_PUV3
- tristate "PKUnity v3 RTC support"
- depends on ARCH_PUV3
- help
- This enables support for the RTC in the PKUnity-v3 SoCs.
-
- This drive can also be built as a module. If so, the module
- will be called rtc-puv3.
-
config RTC_DRV_LOONGSON1
tristate "loongson1 RTC support"
depends on MACH_LOONGSON32
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0721752c6ed4..880e08a409c3 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
-obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
deleted file mode 100644
index 954b88d2485f..000000000000
--- a/drivers/rtc/rtc-puv3.c
+++ /dev/null
@@ -1,286 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * RTC driver code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2010 Guan Xuetao
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/clk.h>
-#include <linux/log2.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-static struct resource *puv3_rtc_mem;
-
-static int puv3_rtc_alarmno = IRQ_RTCAlarm;
-static int puv3_rtc_tickno = IRQ_RTC;
-
-static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
-
-/* IRQ Handlers */
-static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
-{
- struct rtc_device *rdev = id;
-
- writel(readl(RTC_RTSR) | RTC_RTSR_AL, RTC_RTSR);
- rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t puv3_rtc_tickirq(int irq, void *id)
-{
- struct rtc_device *rdev = id;
-
- writel(readl(RTC_RTSR) | RTC_RTSR_HZ, RTC_RTSR);
- rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
- return IRQ_HANDLED;
-}
-
-/* Update control registers */
-static void puv3_rtc_setaie(struct device *dev, int to)
-{
- unsigned int tmp;
-
- dev_dbg(dev, "%s: aie=%d\n", __func__, to);
-
- tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
-
- if (to)
- tmp |= RTC_RTSR_ALE;
-
- writel(tmp, RTC_RTSR);
-}
-
-static int puv3_rtc_setpie(struct device *dev, int enabled)
-{
- unsigned int tmp;
-
- dev_dbg(dev, "%s: pie=%d\n", __func__, enabled);
-
- spin_lock_irq(&puv3_rtc_pie_lock);
- tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
-
- if (enabled)
- tmp |= RTC_RTSR_HZE;
-
- writel(tmp, RTC_RTSR);
- spin_unlock_irq(&puv3_rtc_pie_lock);
-
- return 0;
-}
-
-/* Time read/write */
-static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
-{
- rtc_time64_to_tm(readl(RTC_RCNR), rtc_tm);
-
- dev_dbg(dev, "read time %ptRr\n", rtc_tm);
-
- return 0;
-}
-
-static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
-{
- dev_dbg(dev, "set time %ptRr\n", tm);
-
- writel(rtc_tm_to_time64(tm), RTC_RCNR);
-
- return 0;
-}
-
-static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct rtc_time *alm_tm = &alrm->time;
-
- rtc_time64_to_tm(readl(RTC_RTAR), alm_tm);
-
- alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
-
- dev_dbg(dev, "read alarm: %d, %ptRr\n", alrm->enabled, alm_tm);
-
- return 0;
-}
-
-static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct rtc_time *tm = &alrm->time;
-
- dev_dbg(dev, "set alarm: %d, %ptRr\n", alrm->enabled, tm);
-
- writel(rtc_tm_to_time64(tm), RTC_RTAR);
-
- puv3_rtc_setaie(dev, alrm->enabled);
-
- if (alrm->enabled)
- enable_irq_wake(puv3_rtc_alarmno);
- else
- disable_irq_wake(puv3_rtc_alarmno);
-
- return 0;
-}
-
-static int puv3_rtc_proc(struct device *dev, struct seq_file *seq)
-{
- seq_printf(seq, "periodic_IRQ\t: %s\n",
- (readl(RTC_RTSR) & RTC_RTSR_HZE) ? "yes" : "no");
- return 0;
-}
-
-static const struct rtc_class_ops puv3_rtcops = {
- .read_time = puv3_rtc_gettime,
- .set_time = puv3_rtc_settime,
- .read_alarm = puv3_rtc_getalarm,
- .set_alarm = puv3_rtc_setalarm,
- .proc = puv3_rtc_proc,
-};
-
-static void puv3_rtc_enable(struct device *dev, int en)
-{
- if (!en) {
- writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
- } else {
- /* re-enable the device, and check it is ok */
- if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
- dev_info(dev, "rtc disabled, re-enabling\n");
- writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
- }
- }
-}
-
-static int puv3_rtc_remove(struct platform_device *dev)
-{
- puv3_rtc_setpie(&dev->dev, 0);
- puv3_rtc_setaie(&dev->dev, 0);
-
- release_resource(puv3_rtc_mem);
- kfree(puv3_rtc_mem);
-
- return 0;
-}
-
-static int puv3_rtc_probe(struct platform_device *pdev)
-{
- struct rtc_device *rtc;
- struct resource *res;
- int ret;
-
- dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
-
- /* find the IRQs */
- puv3_rtc_tickno = platform_get_irq(pdev, 1);
- if (puv3_rtc_tickno < 0)
- return -ENOENT;
-
- puv3_rtc_alarmno = platform_get_irq(pdev, 0);
- if (puv3_rtc_alarmno < 0)
- return -ENOENT;
-
- dev_dbg(&pdev->dev, "PKUnity_rtc: tick irq %d, alarm irq %d\n",
- puv3_rtc_tickno, puv3_rtc_alarmno);
-
- rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- ret = devm_request_irq(&pdev->dev, puv3_rtc_alarmno, puv3_rtc_alarmirq,
- 0, "pkunity-rtc alarm", rtc);
- if (ret) {
- dev_err(&pdev->dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
- return ret;
- }
-
- ret = devm_request_irq(&pdev->dev, puv3_rtc_tickno, puv3_rtc_tickirq,
- 0, "pkunity-rtc tick", rtc);
- if (ret) {
- dev_err(&pdev->dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
- return ret;
- }
-
- /* get the memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- return -ENOENT;
- }
-
- puv3_rtc_mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (puv3_rtc_mem == NULL) {
- dev_err(&pdev->dev, "failed to reserve memory region\n");
- ret = -ENOENT;
- goto err_nores;
- }
-
- puv3_rtc_enable(&pdev->dev, 1);
-
- /* register RTC and exit */
- rtc->ops = &puv3_rtcops;
- rtc->range_max = U32_MAX;
- ret = rtc_register_device(rtc);
- if (ret)
- goto err_nortc;
-
- /* platform setup code should have handled this; sigh */
- if (!device_can_wakeup(&pdev->dev))
- device_init_wakeup(&pdev->dev, 1);
-
- platform_set_drvdata(pdev, rtc);
- return 0;
-
- err_nortc:
- puv3_rtc_enable(&pdev->dev, 0);
- release_resource(puv3_rtc_mem);
-
- err_nores:
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ticnt_save;
-
-static int puv3_rtc_suspend(struct device *dev)
-{
- /* save RTAR for anyone using periodic interrupts */
- ticnt_save = readl(RTC_RTAR);
- puv3_rtc_enable(dev, 0);
- return 0;
-}
-
-static int puv3_rtc_resume(struct device *dev)
-{
- puv3_rtc_enable(dev, 1);
- writel(ticnt_save, RTC_RTAR);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(puv3_rtc_pm_ops, puv3_rtc_suspend, puv3_rtc_resume);
-
-static struct platform_driver puv3_rtc_driver = {
- .probe = puv3_rtc_probe,
- .remove = puv3_rtc_remove,
- .driver = {
- .name = "PKUnity-v3-RTC",
- .pm = &puv3_rtc_pm_ops,
- }
-};
-
-module_platform_driver(puv3_rtc_driver);
-
-MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
-MODULE_AUTHOR("Hu Dongliang");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index cf87eb27879f..eb17fea8075c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
blk_update_request(req, BLK_STS_OK,
blk_rq_bytes(req) - proc_bytes);
blk_mq_requeue_request(req, true);
- } else {
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
}
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index facb588d09e4..1b9e1442e6a5 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -319,7 +319,7 @@ dasd_diag_check_device(struct dasd_device *device)
struct dasd_diag_characteristics *rdc_data;
struct vtoc_cms_label *label;
struct dasd_block *block;
- struct dasd_diag_bio bio;
+ struct dasd_diag_bio *bio;
unsigned int sb, bsize;
blocknum_t end_block;
int rc;
@@ -395,29 +395,36 @@ dasd_diag_check_device(struct dasd_device *device)
rc = -ENOMEM;
goto out;
}
+ bio = kzalloc(sizeof(*bio), GFP_KERNEL);
+ if (bio == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to allocate initialization bio");
+ rc = -ENOMEM;
+ goto out_label;
+ }
rc = 0;
end_block = 0;
/* try all sizes - needed for ECKD devices */
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
mdsk_init_io(device, bsize, 0, &end_block);
- memset(&bio, 0, sizeof (struct dasd_diag_bio));
- bio.type = MDSK_READ_REQ;
- bio.block_number = private->pt_block + 1;
- bio.buffer = label;
+ memset(bio, 0, sizeof(*bio));
+ bio->type = MDSK_READ_REQ;
+ bio->block_number = private->pt_block + 1;
+ bio->buffer = label;
memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
private->iob.dev_nr = rdc_data->dev_nr;
private->iob.key = 0;
private->iob.flags = 0; /* do synchronous io */
private->iob.block_count = 1;
private->iob.interrupt_params = 0;
- private->iob.bio_list = &bio;
+ private->iob.bio_list = bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
pr_warn("%s: A 64-bit DIAG call failed\n",
dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
- goto out_label;
+ goto out_bio;
}
mdsk_term_io(device);
if (rc == 0)
@@ -427,7 +434,7 @@ dasd_diag_check_device(struct dasd_device *device)
pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
dev_name(&device->cdev->dev), rc);
rc = -EIO;
- goto out_label;
+ goto out_bio;
}
/* check for label block */
if (memcmp(label->label_id, DASD_DIAG_CMS1,
@@ -457,6 +464,8 @@ dasd_diag_check_device(struct dasd_device *device)
(rc == 4) ? ", read-only device" : "");
rc = 0;
}
+out_bio:
+ kfree(bio);
out_label:
free_page((long) label);
out:
@@ -506,7 +515,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct req_iterator iter;
struct bio_vec bv;
char *dst;
- unsigned int count, datasize;
+ unsigned int count;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
@@ -534,10 +543,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* Build the request */
- datasize = sizeof(struct dasd_diag_req) +
- count*sizeof(struct dasd_diag_bio);
- cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
- blk_mq_rq_to_pdu(req));
+ cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
+ memdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 384edffe5cb4..299e77ec2c41 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,8 +31,7 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
-static blk_qc_t dcssblk_make_request(struct request_queue *q,
- struct bio *bio);
+static blk_qc_t dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
@@ -41,6 +40,7 @@ static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
static int dcssblk_major;
static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
+ .submit_bio = dcssblk_submit_bio,
.open = dcssblk_open,
.release = dcssblk_release,
};
@@ -651,8 +651,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
- dev_info->dcssblk_queue =
- blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE);
+ dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
@@ -833,7 +832,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode)
goto out;
}
atomic_inc(&dev_info->use_count);
- bdev->bd_block_size = 4096;
rc = 0;
out:
return rc;
@@ -868,7 +866,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
}
static blk_qc_t
-dcssblk_make_request(struct request_queue *q, struct bio *bio)
+dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec bvec;
@@ -878,7 +876,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
bytes_done = 0;
dev_info = bio->bi_disk->private_data;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e01889394c84..a4f6f2e62b1d 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -256,7 +256,8 @@ static void scm_request_finish(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
error = blk_mq_rq_to_pdu(scmrq->request[i]);
*error = scmrq->error;
- blk_mq_complete_request(scmrq->request[i]);
+ if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
+ blk_mq_complete_request(scmrq->request[i]);
}
atomic_dec(&bdev->queued_reqs);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 45a04daec89e..c2536f7767b3 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -182,7 +182,7 @@ static unsigned long xpram_highest_page_index(void)
/*
* Block device make request function.
*/
-static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t xpram_submit_bio(struct bio *bio)
{
xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec;
@@ -191,7 +191,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
unsigned long page_addr;
unsigned long bytes;
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
@@ -250,6 +250,7 @@ static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
+ .submit_bio = xpram_submit_bio,
.getgeo = xpram_getgeo,
};
@@ -343,8 +344,7 @@ static int __init xpram_setup_blkdev(void)
xpram_disks[i] = alloc_disk(1);
if (!xpram_disks[i])
goto out;
- xpram_queues[i] = blk_alloc_queue(xpram_make_request,
- NUMA_NO_NODE);
+ xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE);
if (!xpram_queues[i]) {
put_disk(xpram_disks[i]);
goto out;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 98d7fc152e32..aec996de44d9 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -556,8 +556,9 @@ tty3270_scroll_backward(struct kbd_data *kbd)
* Pass input line to tty.
*/
static void
-tty3270_read_tasklet(struct raw3270_request *rrq)
+tty3270_read_tasklet(unsigned long data)
{
+ struct raw3270_request *rrq = (struct raw3270_request *)data;
static char kreset_data = TW_KR;
struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
char *input;
@@ -652,8 +653,9 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
* Hang up the tty
*/
static void
-tty3270_hangup_tasklet(struct tty3270 *tp)
+tty3270_hangup_tasklet(unsigned long data)
{
+ struct tty3270 *tp = (struct tty3270 *)data;
tty_port_tty_hangup(&tp->port, true);
raw3270_put_view(&tp->view);
}
@@ -752,11 +754,9 @@ tty3270_alloc_view(void)
tty_port_init(&tp->port);
timer_setup(&tp->timer, tty3270_update, 0);
- tasklet_init(&tp->readlet,
- (void (*)(unsigned long)) tty3270_read_tasklet,
+ tasklet_init(&tp->readlet, tty3270_read_tasklet,
(unsigned long) tp->read);
- tasklet_init(&tp->hanglet,
- (void (*)(unsigned long)) tty3270_hangup_tasklet,
+ tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
(unsigned long) tp);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 08f812475f5e..d29f1b71618e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
- * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
- * dump format as s390 standalone dumps.
+ * dumps on SCSI disks (zfcpdump).
*
* For more information please refer to Documentation/s390/zfcpdump.rst
*
@@ -16,7 +15,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
-#include <linux/memblock.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
@@ -33,8 +31,6 @@
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
-#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
-
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
@@ -48,7 +44,6 @@ struct ipib_info {
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
-static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *zcore_ipl_block;
@@ -139,46 +134,6 @@ static void release_hsa(void)
hsa_available = 0;
}
-static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return simple_read_from_buffer(buf, count, ppos, filp->private_data,
- memblock.memory.cnt * CHUNK_INFO_SIZE);
-}
-
-static int zcore_memmap_open(struct inode *inode, struct file *filp)
-{
- struct memblock_region *reg;
- char *buf;
- int i = 0;
-
- buf = kcalloc(memblock.memory.cnt, CHUNK_INFO_SIZE, GFP_KERNEL);
- if (!buf) {
- return -ENOMEM;
- }
- for_each_memblock(memory, reg) {
- sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
- (unsigned long long) reg->base,
- (unsigned long long) reg->size);
- }
- filp->private_data = buf;
- return nonseekable_open(inode, filp);
-}
-
-static int zcore_memmap_release(struct inode *inode, struct file *filp)
-{
- kfree(filp->private_data);
- return 0;
-}
-
-static const struct file_operations zcore_memmap_fops = {
- .owner = THIS_MODULE,
- .read = zcore_memmap_read,
- .open = zcore_memmap_open,
- .release = zcore_memmap_release,
- .llseek = no_llseek,
-};
-
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -335,17 +290,11 @@ static int __init zcore_init(void)
rc = -ENOMEM;
goto fail;
}
- zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
- NULL, &zcore_memmap_fops);
- if (!zcore_memmap_file) {
- rc = -ENOMEM;
- goto fail_dir;
- }
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
if (!zcore_reipl_file) {
rc = -ENOMEM;
- goto fail_memmap_file;
+ goto fail_dir;
}
zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
NULL, &zcore_hsa_fops);
@@ -357,8 +306,6 @@ static int __init zcore_init(void)
fail_reipl_file:
debugfs_remove(zcore_reipl_file);
-fail_memmap_file:
- debugfs_remove(zcore_memmap_file);
fail_dir:
debugfs_remove(zcore_dir);
fail:
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index bb1c8402c67d..cd2df4ff8e0e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -15,7 +15,6 @@
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
-#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
@@ -166,11 +165,7 @@ struct qdio_dev_perf_stat {
} ____cacheline_aligned;
struct qdio_queue_perf_stat {
- /*
- * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
- * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
- * aka 127 SBALs found.
- */
+ /* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. */
unsigned int nr_sbals[8];
unsigned int nr_sbal_error;
unsigned int nr_sbal_nop;
@@ -185,8 +180,6 @@ struct qdio_input_q {
/* Batch of SBALs that we processed while polling the queue: */
unsigned int batch_start;
unsigned int batch_count;
- /* last time of noticing incoming data */
- u64 timestamp;
};
struct qdio_output_q {
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index da95c923d81a..863d17c802ca 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -165,7 +165,7 @@ static int qstat_show(struct seq_file *m, void *v)
}
seq_printf(m, "\n1 2.. 4.. 8.. "
- "16.. 32.. 64.. 127\n");
+ "16.. 32.. 64.. 128\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 0c919a11a46e..4fab8bba2cdd 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -413,15 +413,8 @@ static inline void qdio_stop_polling(struct qdio_q *q)
static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
- int pos;
-
q->q_stats.nr_sbal_total += count;
- if (count == QDIO_MAX_BUFFERS_MASK) {
- q->q_stats.nr_sbals[7]++;
- return;
- }
- pos = ilog2(count);
- q->q_stats.nr_sbals[pos]++;
+ q->q_stats.nr_sbals[ilog2(count)]++;
}
static void process_buffer_error(struct qdio_q *q, unsigned int start,
@@ -464,11 +457,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
q->timestamp = get_tod_clock_fast();
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
@@ -521,14 +510,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int count;
-
- count = get_inbound_buffer_frontier(q, start);
-
- if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
-
- return count;
+ return get_inbound_buffer_frontier(q, start);
}
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
@@ -546,22 +528,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
/* more work coming */
return 0;
- if (is_thinint_irq(q->irq_ptr))
- return 1;
-
- /* don't poll under z/VM */
- if (MACHINE_IS_VM)
- return 1;
-
- /*
- * At this point we know, that inbound first_to_check
- * has (probably) not moved (see qdio_inbound_processing).
- */
- if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
- return 1;
- } else
- return 0;
+ return 1;
}
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e71ca4a719a5..24a1940b829e 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -73,8 +73,7 @@ EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
-static struct ap_config_info *ap_configuration;
-static bool initialised;
+static struct ap_config_info *ap_qci_info;
/*
* AP bus related debug feature things.
@@ -93,7 +92,7 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
* Tasklet & timer for AP request polling and interrupts
*/
static void ap_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread;
static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -105,8 +104,10 @@ static struct hrtimer ap_poll_timer;
*/
static unsigned long long poll_timeout = 250000;
-/* Maximum domain id */
-static int ap_max_domain_id;
+/* Maximum domain id, if not given via qci */
+static int ap_max_domain_id = 15;
+/* Maximum adapter id, if not given via qci */
+static int ap_max_adapter_id = 63;
static struct bus_type ap_bus_type;
@@ -154,12 +155,12 @@ static int ap_interrupts_available(void)
}
/**
- * ap_configuration_available(): Test if AP configuration
- * information is available.
+ * ap_qci_available(): Test if AP configuration
+ * information can be queried via QCI subfunction.
*
- * Returns 1 if AP configuration information is available.
+ * Returns 1 if subfunction PQAP(QCI) is available.
*/
-static int ap_configuration_available(void)
+static int ap_qci_available(void)
{
return test_facility(12);
}
@@ -182,22 +183,22 @@ static int ap_apft_available(void)
*/
static inline int ap_qact_available(void)
{
- if (ap_configuration)
- return ap_configuration->qact;
+ if (ap_qci_info)
+ return ap_qci_info->qact;
return 0;
}
/*
- * ap_query_configuration(): Fetch cryptographic config info
+ * ap_fetch_qci_info(): Fetch cryptographic config info
*
* Returns the ap configuration info fetched via PQAP(QCI).
* On success 0 is returned, on failure a negative errno
* is returned, e.g. if the PQAP(QCI) instruction is not
* available, the return value will be -EOPNOTSUPP.
*/
-static inline int ap_query_configuration(struct ap_config_info *info)
+static inline int ap_fetch_qci_info(struct ap_config_info *info)
{
- if (!ap_configuration_available())
+ if (!ap_qci_available())
return -EOPNOTSUPP;
if (!info)
return -EINVAL;
@@ -205,21 +206,40 @@ static inline int ap_query_configuration(struct ap_config_info *info)
}
/**
- * ap_init_configuration(): Allocate and query configuration array.
+ * ap_init_qci_info(): Allocate and query qci config info.
+ * Does also update the static variables ap_max_domain_id
+ * and ap_max_adapter_id if this info is available.
+
*/
-static void ap_init_configuration(void)
+static void __init ap_init_qci_info(void)
{
- if (!ap_configuration_available())
+ if (!ap_qci_available()) {
+ AP_DBF(DBF_INFO, "%s QCI not supported\n", __func__);
return;
+ }
- ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
- if (!ap_configuration)
+ ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
+ if (!ap_qci_info)
return;
- if (ap_query_configuration(ap_configuration) != 0) {
- kfree(ap_configuration);
- ap_configuration = NULL;
+ if (ap_fetch_qci_info(ap_qci_info) != 0) {
+ kfree(ap_qci_info);
+ ap_qci_info = NULL;
return;
}
+ AP_DBF(DBF_INFO, "%s successful fetched initial qci info\n", __func__);
+
+ if (ap_qci_info->apxa) {
+ if (ap_qci_info->Na) {
+ ap_max_adapter_id = ap_qci_info->Na;
+ AP_DBF(DBF_INFO, "%s new ap_max_adapter_id is %d\n",
+ __func__, ap_max_adapter_id);
+ }
+ if (ap_qci_info->Nd) {
+ ap_max_domain_id = ap_qci_info->Nd;
+ AP_DBF(DBF_INFO, "%s new ap_max_domain_id is %d\n",
+ __func__, ap_max_domain_id);
+ }
+ }
}
/*
@@ -233,7 +253,6 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
/*
* ap_test_config_card_id(): Test, whether an AP card ID is configured.
- * @id AP card ID
*
* Returns 0 if the card is not configured
* 1 if the card is configured or
@@ -241,16 +260,16 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
*/
static inline int ap_test_config_card_id(unsigned int id)
{
- if (!ap_configuration) /* QCI not supported */
- /* only ids 0...3F may be probed */
- return id < 0x40 ? 1 : 0;
- return ap_test_config(ap_configuration->apm, id);
+ if (id > ap_max_adapter_id)
+ return 0;
+ if (ap_qci_info)
+ return ap_test_config(ap_qci_info->apm, id);
+ return 1;
}
/*
* ap_test_config_usage_domain(): Test, whether an AP usage domain
* is configured.
- * @domain AP usage domain ID
*
* Returns 0 if the usage domain is not configured
* 1 if the usage domain is configured or
@@ -258,9 +277,11 @@ static inline int ap_test_config_card_id(unsigned int id)
*/
int ap_test_config_usage_domain(unsigned int domain)
{
- if (!ap_configuration) /* QCI not supported */
- return domain < 16;
- return ap_test_config(ap_configuration->aqm, domain);
+ if (domain > ap_max_domain_id)
+ return 0;
+ if (ap_qci_info)
+ return ap_test_config(ap_qci_info->aqm, domain);
+ return 1;
}
EXPORT_SYMBOL(ap_test_config_usage_domain);
@@ -274,43 +295,44 @@ EXPORT_SYMBOL(ap_test_config_usage_domain);
*/
int ap_test_config_ctrl_domain(unsigned int domain)
{
- if (!ap_configuration) /* QCI not supported */
+ if (!ap_qci_info || domain > ap_max_domain_id)
return 0;
- return ap_test_config(ap_configuration->adm, domain);
+ return ap_test_config(ap_qci_info->adm, domain);
}
EXPORT_SYMBOL(ap_test_config_ctrl_domain);
-/**
- * ap_query_queue(): Check if an AP queue is available.
- * @qid: The AP queue number
- * @queue_depth: Pointer to queue depth value
- * @device_type: Pointer to device type value
- * @facilities: Pointer to facility indicator
+/*
+ * ap_queue_info(): Check and get AP queue info.
+ * Returns true if TAPQ succeeded and the info is filled or
+ * false otherwise.
*/
-static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
- unsigned int *facilities)
+static bool ap_queue_info(ap_qid_t qid, int *q_type,
+ unsigned int *q_fac, int *q_depth)
{
struct ap_queue_status status;
- unsigned long info;
- int nd;
+ unsigned long info = 0;
- if (!ap_test_config_card_id(AP_QID_CARD(qid)))
- return -ENODEV;
+ /* make sure we don't run into a specifiation exception */
+ if (AP_QID_CARD(qid) > ap_max_adapter_id ||
+ AP_QID_QUEUE(qid) > ap_max_domain_id)
+ return false;
+ /* call TAPQ on this APQN */
status = ap_test_queue(qid, ap_apft_available(), &info);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- *queue_depth = (int)(info & 0xff);
- *device_type = (int)((info >> 24) & 0xff);
- *facilities = (unsigned int)(info >> 32);
- /* Update maximum domain id */
- nd = (info >> 16) & 0xff;
- /* if N bit is available, z13 and newer */
- if ((info & (1UL << 57)) && nd > 0)
- ap_max_domain_id = nd;
- else /* older machine types */
- ap_max_domain_id = 15;
- switch (*device_type) {
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ /*
+ * According to the architecture in all these cases the
+ * info should be filled. All bits 0 is not possible as
+ * there is at least one of the mode bits set.
+ */
+ if (WARN_ON_ONCE(!info))
+ return false;
+ *q_type = (int)((info >> 24) & 0xff);
+ *q_fac = (unsigned int)(info >> 32);
+ *q_depth = (int)(info & 0xff);
+ switch (*q_type) {
/* For CEX2 and CEX3 the available functions
* are not reflected by the facilities bits.
* Instead it is coded into the type. So here
@@ -318,37 +340,31 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
*/
case AP_DEVICE_TYPE_CEX2A:
case AP_DEVICE_TYPE_CEX3A:
- *facilities |= 0x08000000;
+ *q_fac |= 0x08000000;
break;
case AP_DEVICE_TYPE_CEX2C:
case AP_DEVICE_TYPE_CEX3C:
- *facilities |= 0x10000000;
+ *q_fac |= 0x10000000;
break;
default:
break;
}
- return 0;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- case AP_RESPONSE_INVALID_ADDRESS:
- return -ENODEV;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_OTHERWISE_CHANGED:
- case AP_RESPONSE_BUSY:
- return -EBUSY;
+ return true;
default:
- BUG();
+ /*
+ * A response code which indicates, there is no info available.
+ */
+ return false;
}
}
-void ap_wait(enum ap_wait wait)
+void ap_wait(enum ap_sm_wait wait)
{
ktime_t hr_time;
switch (wait) {
- case AP_WAIT_AGAIN:
- case AP_WAIT_INTERRUPT:
+ case AP_SM_WAIT_AGAIN:
+ case AP_SM_WAIT_INTERRUPT:
if (ap_using_interrupts())
break;
if (ap_poll_kthread) {
@@ -356,7 +372,7 @@ void ap_wait(enum ap_wait wait)
break;
}
fallthrough;
- case AP_WAIT_TIMEOUT:
+ case AP_SM_WAIT_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
hr_time = poll_timeout;
@@ -365,7 +381,7 @@ void ap_wait(enum ap_wait wait)
}
spin_unlock_bh(&ap_poll_timer_lock);
break;
- case AP_WAIT_NONE:
+ case AP_SM_WAIT_NONE:
default:
break;
}
@@ -382,7 +398,7 @@ void ap_request_timeout(struct timer_list *t)
struct ap_queue *aq = from_timer(aq, t, timeout);
spin_lock_bh(&aq->lock);
- ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
spin_unlock_bh(&aq->lock);
}
@@ -418,7 +434,7 @@ static void ap_tasklet_fn(unsigned long dummy)
{
int bkt;
struct ap_queue *aq;
- enum ap_wait wait = AP_WAIT_NONE;
+ enum ap_sm_wait wait = AP_SM_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
* be received. Doing it in the beginning of the tasklet is therefor
@@ -430,7 +446,7 @@ static void ap_tasklet_fn(unsigned long dummy)
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
spin_lock_bh(&aq->lock);
- wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+ wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
spin_unlock_bh(&ap_queues_lock);
@@ -751,9 +767,6 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
{
struct device_driver *drv = &ap_drv->driver;
- if (!initialised)
- return -ENODEV;
-
drv->bus = &ap_bus_type;
drv->probe = ap_device_probe;
drv->remove = ap_device_remove;
@@ -929,11 +942,12 @@ static ssize_t ap_domain_store(struct bus_type *bus,
domain < 0 || domain > ap_max_domain_id ||
!test_bit_inv(domain, ap_perms.aqm))
return -EINVAL;
+
spin_lock_bh(&ap_domain_lock);
ap_domain_index = domain;
spin_unlock_bh(&ap_domain_lock);
- AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
+ AP_DBF(DBF_INFO, "stored new default domain=%d\n", domain);
return count;
}
@@ -942,45 +956,45 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
+ if (!ap_qci_info) /* QCI not supported */
return scnprintf(buf, PAGE_SIZE, "not supported\n");
return scnprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->adm[0], ap_configuration->adm[1],
- ap_configuration->adm[2], ap_configuration->adm[3],
- ap_configuration->adm[4], ap_configuration->adm[5],
- ap_configuration->adm[6], ap_configuration->adm[7]);
+ ap_qci_info->adm[0], ap_qci_info->adm[1],
+ ap_qci_info->adm[2], ap_qci_info->adm[3],
+ ap_qci_info->adm[4], ap_qci_info->adm[5],
+ ap_qci_info->adm[6], ap_qci_info->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
+ if (!ap_qci_info) /* QCI not supported */
return scnprintf(buf, PAGE_SIZE, "not supported\n");
return scnprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->aqm[0], ap_configuration->aqm[1],
- ap_configuration->aqm[2], ap_configuration->aqm[3],
- ap_configuration->aqm[4], ap_configuration->aqm[5],
- ap_configuration->aqm[6], ap_configuration->aqm[7]);
+ ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+ ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+ ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+ ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
+ if (!ap_qci_info) /* QCI not supported */
return scnprintf(buf, PAGE_SIZE, "not supported\n");
return scnprintf(buf, PAGE_SIZE,
"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->apm[0], ap_configuration->apm[1],
- ap_configuration->apm[2], ap_configuration->apm[3],
- ap_configuration->apm[4], ap_configuration->apm[5],
- ap_configuration->apm[6], ap_configuration->apm[7]);
+ ap_qci_info->apm[0], ap_qci_info->apm[1],
+ ap_qci_info->apm[2], ap_qci_info->apm[3],
+ ap_qci_info->apm[4], ap_qci_info->apm[5],
+ ap_qci_info->apm[6], ap_qci_info->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
@@ -1066,17 +1080,18 @@ static BUS_ATTR_RW(poll_timeout);
static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
{
- int max_domain_id;
-
- if (ap_configuration)
- max_domain_id = ap_max_domain_id ? : -1;
- else
- max_domain_id = 15;
- return scnprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
+static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
+}
+
+static BUS_ATTR_RO(ap_max_adapter_id);
+
static ssize_t apmask_show(struct bus_type *bus, char *buf)
{
int rc;
@@ -1149,6 +1164,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_interrupts,
&bus_attr_poll_timeout,
&bus_attr_ap_max_domain_id,
+ &bus_attr_ap_max_adapter_id,
&bus_attr_apmask,
&bus_attr_aqmask,
NULL,
@@ -1160,47 +1176,42 @@ static struct bus_attribute *const ap_bus_attrs[] = {
*/
static void ap_select_domain(void)
{
- int count, max_count, best_domain;
struct ap_queue_status status;
- int i, j;
+ int card, dom;
/*
- * We want to use a single domain. Either the one specified with
- * the "domain=" parameter or the domain with the maximum number
- * of devices.
+ * Choose the default domain. Either the one specified with
+ * the "domain=" parameter or the first domain with at least
+ * one valid APQN.
*/
spin_lock_bh(&ap_domain_lock);
if (ap_domain_index >= 0) {
/* Domain has already been selected. */
- spin_unlock_bh(&ap_domain_lock);
- return;
+ goto out;
}
- best_domain = -1;
- max_count = 0;
- for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_usage_domain(i) ||
- !test_bit_inv(i, ap_perms.aqm))
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ if (!ap_test_config_usage_domain(dom) ||
+ !test_bit_inv(dom, ap_perms.aqm))
continue;
- count = 0;
- for (j = 0; j < AP_DEVICES; j++) {
- if (!ap_test_config_card_id(j))
+ for (card = 0; card <= ap_max_adapter_id; card++) {
+ if (!ap_test_config_card_id(card) ||
+ !test_bit_inv(card, ap_perms.apm))
continue;
- status = ap_test_queue(AP_MKQID(j, i),
+ status = ap_test_queue(AP_MKQID(card, dom),
ap_apft_available(),
NULL);
- if (status.response_code != AP_RESPONSE_NORMAL)
- continue;
- count++;
- }
- if (count > max_count) {
- max_count = count;
- best_domain = i;
+ if (status.response_code == AP_RESPONSE_NORMAL)
+ break;
}
+ if (card <= ap_max_adapter_id)
+ break;
}
- if (best_domain >= 0) {
- ap_domain_index = best_domain;
- AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
+ if (dom <= ap_max_domain_id) {
+ ap_domain_index = dom;
+ AP_DBF(DBF_DEBUG, "%s new default domain is %d\n",
+ __func__, ap_domain_index);
}
+out:
spin_unlock_bh(&ap_domain_lock);
}
@@ -1279,12 +1290,13 @@ static int __match_queue_device_with_queue_id(struct device *dev, const void *da
*/
static void _ap_scan_bus_adapter(int id)
{
+ bool broken;
ap_qid_t qid;
unsigned int func;
struct ap_card *ac;
struct device *dev;
struct ap_queue *aq;
- int rc, dom, depth, type, comp_type, borked;
+ int rc, dom, depth, type, comp_type;
/* check if there is a card device registered with this id */
dev = bus_find_device(&ap_bus_type, NULL,
@@ -1312,23 +1324,23 @@ static void _ap_scan_bus_adapter(int id)
/* find the first valid queue */
for (dom = 0; dom < AP_DOMAINS; dom++) {
qid = AP_MKQID(id, dom);
- if (ap_query_queue(qid, &depth, &type, &func) == 0)
+ if (ap_queue_info(qid, &type, &func, &depth))
break;
}
- borked = 0;
+ broken = false;
if (dom >= AP_DOMAINS) {
/* no accessible queue on this card */
- borked = 1;
+ broken = true;
} else if (ac->raw_hwtype != type) {
/* card type has changed */
AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
- borked = 1;
+ broken = true;
} else if (ac->functions != func) {
/* card functions have changed */
AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
- borked = 1;
+ broken = true;
}
- if (borked) {
+ if (broken) {
/* unregister card device and associated queues */
bus_for_each_dev(&ap_bus_type, NULL,
(void *)(long) id,
@@ -1364,16 +1376,14 @@ static void _ap_scan_bus_adapter(int id)
continue;
}
/* try to fetch infos about this queue */
- rc = ap_query_queue(qid, &depth, &type, &func);
+ broken = !ap_queue_info(qid, &type, &func, &depth);
if (dev) {
- if (rc == -ENODEV)
- borked = 1;
- else {
+ if (!broken) {
spin_lock_bh(&aq->lock);
- borked = aq->state == AP_STATE_BORKED;
+ broken = aq->sm_state == AP_SM_STATE_BORKED;
spin_unlock_bh(&aq->lock);
}
- if (borked) {
+ if (broken) {
/* Remove broken device */
AP_DBF(DBF_DEBUG,
"removing broken queue=%02x.%04x\n",
@@ -1383,7 +1393,7 @@ static void _ap_scan_bus_adapter(int id)
put_device(dev);
continue;
}
- if (rc)
+ if (broken)
continue;
/* a new queue device is needed, check out comp type */
comp_type = ap_get_compatible_type(qid, type, func);
@@ -1435,11 +1445,11 @@ static void ap_scan_bus(struct work_struct *unused)
{
int id;
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
-
- ap_query_configuration(ap_configuration);
+ ap_fetch_qci_info(ap_qci_info);
ap_select_domain();
+ AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+
/* loop over all possible adapters */
for (id = 0; id < AP_DEVICES; id++)
_ap_scan_bus_adapter(id);
@@ -1505,7 +1515,6 @@ static void __init ap_perms_init(void)
*/
static int __init ap_module_init(void)
{
- int max_domain_id;
int rc, i;
rc = ap_debug_init();
@@ -1524,14 +1533,10 @@ static int __init ap_module_init(void)
ap_perms_init();
/* Get AP configuration data if available */
- ap_init_configuration();
-
- if (ap_configuration)
- max_domain_id =
- ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
- else
- max_domain_id = 15;
- if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+ ap_init_qci_info();
+
+ /* check default domain setting */
+ if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
(ap_domain_index >= 0 &&
!test_bit_inv(ap_domain_index, ap_perms.aqm))) {
pr_warn("%d is not a valid cryptographic domain\n",
@@ -1539,6 +1544,7 @@ static int __init ap_module_init(void)
ap_domain_index = -1;
}
+ /* enable interrupts if available */
if (ap_interrupts_available()) {
rc = register_adapter_interrupt(&ap_airq);
ap_airq_flag = (rc == 0);
@@ -1581,7 +1587,6 @@ static int __init ap_module_init(void)
}
queue_work(system_long_wq, &ap_scan_work);
- initialised = true;
return 0;
@@ -1595,7 +1600,7 @@ out_bus:
out:
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
- kfree(ap_configuration);
+ kfree(ap_qci_info);
return rc;
}
device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 053cc34d2ca2..1a1d5e3c8d45 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -83,39 +83,39 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
/*
- * AP device states
+ * AP queue state machine states
*/
-enum ap_state {
- AP_STATE_RESET_START,
- AP_STATE_RESET_WAIT,
- AP_STATE_SETIRQ_WAIT,
- AP_STATE_IDLE,
- AP_STATE_WORKING,
- AP_STATE_QUEUE_FULL,
- AP_STATE_REMOVE, /* about to be removed from driver */
- AP_STATE_UNBOUND, /* momentary not bound to a driver */
- AP_STATE_BORKED, /* broken */
- NR_AP_STATES
+enum ap_sm_state {
+ AP_SM_STATE_RESET_START,
+ AP_SM_STATE_RESET_WAIT,
+ AP_SM_STATE_SETIRQ_WAIT,
+ AP_SM_STATE_IDLE,
+ AP_SM_STATE_WORKING,
+ AP_SM_STATE_QUEUE_FULL,
+ AP_SM_STATE_REMOVE, /* about to be removed from driver */
+ AP_SM_STATE_UNBOUND, /* momentary not bound to a driver */
+ AP_SM_STATE_BORKED, /* broken */
+ NR_AP_SM_STATES
};
/*
- * AP device events
+ * AP queue state machine events
*/
-enum ap_event {
- AP_EVENT_POLL,
- AP_EVENT_TIMEOUT,
- NR_AP_EVENTS
+enum ap_sm_event {
+ AP_SM_EVENT_POLL,
+ AP_SM_EVENT_TIMEOUT,
+ NR_AP_SM_EVENTS
};
/*
- * AP wait behaviour
+ * AP queue state wait behaviour
*/
-enum ap_wait {
- AP_WAIT_AGAIN, /* retry immediately */
- AP_WAIT_TIMEOUT, /* wait for timeout */
- AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
- AP_WAIT_NONE, /* no wait */
- NR_AP_WAIT
+enum ap_sm_wait {
+ AP_SM_WAIT_AGAIN, /* retry immediately */
+ AP_SM_WAIT_TIMEOUT, /* wait for timeout */
+ AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
+ AP_SM_WAIT_NONE, /* no wait */
+ NR_AP_SM_WAIT
};
struct ap_device;
@@ -172,7 +172,7 @@ struct ap_queue {
ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
- enum ap_state state; /* State of the AP device. */
+ enum ap_sm_state sm_state; /* ap queue state machine state */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
u64 total_request_count; /* # requests ever for this AP device.*/
@@ -185,22 +185,23 @@ struct ap_queue {
#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
-typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long long psmid; /* Message id. */
- void *message; /* Pointer to message buffer. */
- size_t length; /* Message length. */
+ void *msg; /* Pointer to message buffer. */
+ unsigned int len; /* Message length. */
+ u32 flags; /* Flags, see AP_MSG_FLAG_xxx */
int rc; /* Return code for this message */
-
void *private; /* ap driver private pointer. */
- unsigned int special:1; /* Used for special commands. */
/* receive is called from tasklet context */
void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
+#define AP_MSG_FLAG_SPECIAL (1 << 16) /* flag msg as 'special' with NQAP */
+
/**
* ap_init_message() - Initialize ap_message.
* Initialize a message before using. Otherwise this might result in
@@ -218,7 +219,7 @@ static inline void ap_init_message(struct ap_message *ap_msg)
*/
static inline void ap_release_message(struct ap_message *ap_msg)
{
- kzfree(ap_msg->message);
+ kzfree(ap_msg->msg);
kzfree(ap_msg->private);
}
@@ -230,15 +231,15 @@ static inline void ap_release_message(struct ap_message *ap_msg)
int ap_send(ap_qid_t, unsigned long long, void *, size_t);
int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
-void ap_wait(enum ap_wait wait);
+void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 73b077dca3e6..688ebebbf98c 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -69,9 +69,9 @@ static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
*/
static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
- unsigned int special)
+ int special)
{
- if (special == 1)
+ if (special)
qid |= 0x400000UL;
return ap_nqap(qid, psmid, msg, length);
}
@@ -119,9 +119,9 @@ EXPORT_SYMBOL(ap_recv);
/* State machine definitions and helpers */
-static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
{
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
}
/**
@@ -129,7 +129,7 @@ static enum ap_wait ap_sm_nop(struct ap_queue *aq)
* not change the state of the device.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
@@ -137,7 +137,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
struct ap_message *ap_msg;
status = ap_dqap(aq->qid, &aq->reply->psmid,
- aq->reply->message, aq->reply->length);
+ aq->reply->msg, aq->reply->len);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count--;
@@ -172,31 +172,31 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
* ap_sm_read(): Receive pending reply messages from an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_read(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
status = ap_sm_recv(aq);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
- aq->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
}
- aq->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
- return AP_WAIT_INTERRUPT;
- aq->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_INTERRUPT;
+ aq->sm_state = AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_NONE;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
}
}
@@ -204,19 +204,20 @@ static enum ap_wait ap_sm_read(struct ap_queue *aq)
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
if (aq->requestq_count <= 0)
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
status = __ap_send(aq->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length, ap_msg->special);
+ ap_msg->msg, ap_msg->len,
+ ap_msg->flags & AP_MSG_FLAG_SPECIAL);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
aq->queue_count++;
@@ -226,26 +227,26 @@ static enum ap_wait ap_sm_write(struct ap_queue *aq)
aq->requestq_count--;
aq->pendingq_count++;
if (aq->queue_count < aq->card->queue_depth) {
- aq->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
}
fallthrough;
case AP_RESPONSE_Q_FULL:
- aq->state = AP_STATE_QUEUE_FULL;
- return AP_WAIT_INTERRUPT;
+ aq->sm_state = AP_SM_STATE_QUEUE_FULL;
+ return AP_SM_WAIT_INTERRUPT;
case AP_RESPONSE_RESET_IN_PROGRESS:
- aq->state = AP_STATE_RESET_WAIT;
- return AP_WAIT_TIMEOUT;
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ return AP_SM_WAIT_TIMEOUT;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(aq, ap_msg, NULL);
- return AP_WAIT_AGAIN;
+ return AP_SM_WAIT_AGAIN;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
}
}
@@ -253,9 +254,9 @@ static enum ap_wait ap_sm_write(struct ap_queue *aq)
* ap_sm_read_write(): Send and receive messages to/from an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
{
return min(ap_sm_read(aq), ap_sm_write(aq));
}
@@ -266,7 +267,7 @@ static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
*
* Submit the Reset command to an AP queue.
*/
-static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
@@ -274,17 +275,17 @@ static enum ap_wait ap_sm_reset(struct ap_queue *aq)
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
- aq->state = AP_STATE_RESET_WAIT;
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->interrupt = AP_INTR_DISABLED;
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_TIMEOUT;
case AP_RESPONSE_BUSY:
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
}
}
@@ -294,7 +295,7 @@ static enum ap_wait ap_sm_reset(struct ap_queue *aq)
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
-static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
void *lsi_ptr;
@@ -310,20 +311,20 @@ static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
case AP_RESPONSE_NORMAL:
lsi_ptr = ap_airq_ptr();
if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
- aq->state = AP_STATE_SETIRQ_WAIT;
+ aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
else
- aq->state = (aq->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- return AP_WAIT_AGAIN;
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
}
}
@@ -333,7 +334,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
-static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
@@ -347,75 +348,75 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
if (status.irq_enabled == 1) {
/* Irqs are now enabled */
aq->interrupt = AP_INTR_ENABLED;
- aq->state = (aq->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
- return AP_WAIT_AGAIN;
+ return AP_SM_WAIT_AGAIN;
fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_TIMEOUT;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_BORKED;
+ return AP_SM_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
- [AP_STATE_RESET_START] = {
- [AP_EVENT_POLL] = ap_sm_reset,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
+ [AP_SM_STATE_RESET_START] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_RESET_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_reset_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_RESET_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_SETIRQ_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_setirq_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_SETIRQ_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_IDLE] = {
- [AP_EVENT_POLL] = ap_sm_write,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_IDLE] = {
+ [AP_SM_EVENT_POLL] = ap_sm_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_WORKING] = {
- [AP_EVENT_POLL] = ap_sm_read_write,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ [AP_SM_STATE_WORKING] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_QUEUE_FULL] = {
- [AP_EVENT_POLL] = ap_sm_read,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ [AP_SM_STATE_QUEUE_FULL] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_REMOVE] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_REMOVE] = {
+ [AP_SM_EVENT_POLL] = ap_sm_nop,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_UNBOUND] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_UNBOUND] = {
+ [AP_SM_EVENT_POLL] = ap_sm_nop,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_BORKED] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_BORKED] = {
+ [AP_SM_EVENT_POLL] = ap_sm_nop,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
};
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
{
- return ap_jumptable[aq->state][event](aq);
+ return ap_jumptable[aq->sm_state][event](aq);
}
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
{
- enum ap_wait wait;
+ enum ap_sm_wait wait;
- while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+ while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
;
return wait;
}
@@ -486,13 +487,13 @@ static ssize_t reset_show(struct device *dev,
int rc = 0;
spin_lock_bh(&aq->lock);
- switch (aq->state) {
- case AP_STATE_RESET_START:
- case AP_STATE_RESET_WAIT:
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ case AP_SM_STATE_RESET_WAIT:
rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
break;
- case AP_STATE_WORKING:
- case AP_STATE_QUEUE_FULL:
+ case AP_SM_STATE_WORKING:
+ case AP_SM_STATE_QUEUE_FULL:
rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
break;
default:
@@ -510,8 +511,8 @@ static ssize_t reset_store(struct device *dev,
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
- aq->state = AP_STATE_RESET_START;
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
@@ -529,7 +530,7 @@ static ssize_t interrupt_show(struct device *dev,
int rc = 0;
spin_lock_bh(&aq->lock);
- if (aq->state == AP_STATE_SETIRQ_WAIT)
+ if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
else if (aq->interrupt == AP_INTR_ENABLED)
rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
@@ -586,7 +587,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_UNBOUND;
+ aq->sm_state = AP_SM_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
@@ -601,7 +602,7 @@ void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
aq->reply = reply;
spin_lock_bh(&aq->lock);
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_reply);
@@ -625,7 +626,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
aq->total_request_count++;
atomic64_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
- ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_message);
@@ -698,7 +699,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
/* flush queue */
__ap_flush_queue(aq);
/* set REMOVE state to prevent new messages are queued in */
- aq->state = AP_STATE_REMOVE;
+ aq->sm_state = AP_SM_STATE_REMOVE;
spin_unlock_bh(&aq->lock);
del_timer_sync(&aq->timeout);
}
@@ -707,22 +708,22 @@ void ap_queue_remove(struct ap_queue *aq)
{
/*
* all messages have been flushed and the state is
- * AP_STATE_REMOVE. Now reset with zero which also
+ * AP_SM_STATE_REMOVE. Now reset with zero which also
* clears the irq registration and move the state
- * to AP_STATE_UNBOUND to signal that this queue
+ * to AP_SM_STATE_UNBOUND to signal that this queue
* is not used by any driver currently.
*/
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
- aq->state = AP_STATE_UNBOUND;
+ aq->sm_state = AP_SM_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
- aq->state = AP_STATE_RESET_START;
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 74e63ec49068..d5880f52dc2b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -1603,8 +1603,8 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
if (rc == 0)
break;
}
- if (rc)
- return rc;
+ if (rc)
+ return rc;
if (is_xts) {
keysize = CCACIPHERTOKENSIZE;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 56a405dce8bc..4dbbfd88262c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -634,7 +634,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
+ unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
struct module *mod;
@@ -718,7 +718,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
+ unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
struct module *mod;
@@ -803,7 +803,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
- unsigned int weight, pref_weight;
+ unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
unsigned short *domain, tdom;
int qid = 0, rc = -ENODEV;
@@ -822,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
* domain but a control only domain, use the default domain as target.
*/
tdom = *domain;
- if (tdom >= 0 && tdom < AP_DOMAINS &&
+ if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom) &&
ap_domain_index >= 0)
@@ -931,7 +931,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
struct zcrypt_queue *zq, *pref_zq;
struct ep11_target_dev *targets;
unsigned short target_num;
- unsigned int weight, pref_weight;
+ unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
@@ -1040,7 +1040,7 @@ static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
+ unsigned int weight = 0, pref_weight = 0;
unsigned int func_code;
struct ap_message ap_msg;
unsigned int domain;
@@ -1298,99 +1298,119 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
-static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
- struct ap_perms *perms =
- (struct ap_perms *) filp->private_data;
+ struct ica_rsa_modexpo mex;
+ struct ica_rsa_modexpo __user *umex = (void __user *) arg;
- rc = zcrypt_check_ioctl(perms, cmd);
- if (rc)
- return rc;
-
- switch (cmd) {
- case ICARSAMODEXPO: {
- struct ica_rsa_modexpo __user *umex = (void __user *) arg;
- struct ica_rsa_modexpo mex;
-
- if (copy_from_user(&mex, umex, sizeof(mex)))
- return -EFAULT;
+ if (copy_from_user(&mex, umex, sizeof(mex)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &mex);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_modexpo(perms, &mex);
- } while (rc == -EAGAIN);
- if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
- return rc;
- }
- return put_user(mex.outputdatalength, &umex->outputdatalength);
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+ return rc;
}
- case ICARSACRT: {
- struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
- struct ica_rsa_modexpo_crt crt;
+ return put_user(mex.outputdatalength, &umex->outputdatalength);
+}
- if (copy_from_user(&crt, ucrt, sizeof(crt)))
- return -EFAULT;
+static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ica_rsa_modexpo_crt crt;
+ struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+
+ if (copy_from_user(&crt, ucrt, sizeof(crt)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_rsa_crt(perms, &crt);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_crt(perms, &crt);
- } while (rc == -EAGAIN);
- if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
- return rc;
- }
- return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+ return rc;
}
- case ZSECSENDCPRB: {
- struct ica_xcRB __user *uxcRB = (void __user *) arg;
- struct ica_xcRB xcRB;
+ return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+}
- if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
- return -EFAULT;
+static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ica_xcRB xcRB;
+ struct ica_xcRB __user *uxcRB = (void __user *) arg;
+
+ if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+ return -EFAULT;
+ do {
+ rc = _zcrypt_send_cprb(perms, &xcRB);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = _zcrypt_send_cprb(perms, &xcRB);
- } while (rc == -EAGAIN);
- if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcRB.status);
- if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
- return -EFAULT;
- return rc;
- }
- case ZSENDEP11CPRB: {
- struct ep11_urb __user *uxcrb = (void __user *)arg;
- struct ep11_urb xcrb;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcRB.status);
+ if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+ return -EFAULT;
+ return rc;
+}
- if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
- return -EFAULT;
+static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ep11_urb xcrb;
+ struct ep11_urb __user *uxcrb = (void __user *)arg;
+
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+ do {
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
+ } while (rc == -EAGAIN);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
- } while (rc == -EAGAIN);
- if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
- if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
- return -EFAULT;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *) filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
return rc;
- }
+
+ switch (cmd) {
+ case ICARSAMODEXPO:
+ return icarsamodexpo_ioctl(perms, arg);
+ case ICARSACRT:
+ return icarsacrt_ioctl(perms, arg);
+ case ZSECSENDCPRB:
+ return zsecsendcprb_ioctl(perms, arg);
+ case ZSENDEP11CPRB:
+ return zsendep11cprb_ioctl(perms, arg);
case ZCRYPT_DEVICE_STATUS: {
struct zcrypt_device_status_ext *device_status;
size_t total_size = MAX_ZDEV_ENTRIES_EXT
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 1b835398feec..3f5b61351cde 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -205,9 +205,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
preqcblk->rpl_msgbl = cprbplusparamblen;
if (paramblen) {
preqcblk->req_parmb =
- ((u8 *) preqcblk) + sizeof(struct CPRBX);
+ ((u8 __user *) preqcblk) + sizeof(struct CPRBX);
preqcblk->rpl_parmb =
- ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ ((u8 __user *) prepcblk) + sizeof(struct CPRBX);
}
*pcprbmem = cprbmem;
@@ -274,7 +274,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
{
int i, rc, keysize;
int seckeysize;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct kgreqparm {
@@ -320,7 +320,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with KG request */
- preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+ preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "KG", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
@@ -377,8 +377,9 @@ int cca_genseckey(u16 cardnr, u16 domain,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct kgrepparm *) ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
@@ -415,7 +416,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
{
int rc, keysize, seckeysize;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct cmreqparm {
@@ -460,7 +461,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
preqcblk->domain = domain;
/* fill request cprb param block with CM request */
- preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+ preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "CM", 2);
memcpy(preqparm->rule_array, "AES ", 8);
preqparm->rule_array_len =
@@ -514,8 +515,9 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct cmrepparm *) ptr;
/* check length of the returned secure key token */
seckeysize = prepparm->lv3.keyblock.toklen
@@ -554,7 +556,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct uskreqparm {
@@ -605,7 +607,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with USK request */
- preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+ preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "US", 2);
preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
preqparm->lv1.len = sizeof(struct lv1);
@@ -646,8 +648,9 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct uskrepparm *) ptr;
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
@@ -714,7 +717,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
u8 *keybuf, size_t *keybufsize)
{
int rc;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct gkreqparm {
@@ -796,7 +799,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
preqcblk->req_parml = sizeof(struct gkreqparm);
/* prepare request param block with GK request */
- preqparm = (struct gkreqparm *) preqcblk->req_parmb;
+ preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "GK", 2);
preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preqparm->rule_array, "AES OP ", 2*8);
@@ -867,8 +870,9 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct gkrepparm *) ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
@@ -917,7 +921,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
int *key_token_size)
{
int rc, n;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct rule_array_block {
@@ -974,7 +978,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
preqcblk->req_parml = 0;
/* prepare request param block with IP request */
- preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
+ preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
memcpy(preq_ra_block->subfunc_code, "IP", 2);
preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
memcpy(preq_ra_block->rule_array, rule_array_1, 8);
@@ -987,7 +991,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
}
/* prepare vud block */
- preq_vud_block = (struct vud_block *)
+ preq_vud_block = (struct vud_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = complete ? 0 : (clr_key_bit_size + 7) / 8;
preq_vud_block->len = sizeof(struct vud_block) + n;
@@ -1001,7 +1005,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
preqcblk->req_parml += preq_vud_block->len;
/* prepare key block */
- preq_key_block = (struct key_block *)
+ preq_key_block = (struct key_block __force *)
(preqcblk->req_parmb + preqcblk->req_parml);
n = *key_token_size;
preq_key_block->len = sizeof(struct key_block) + n;
@@ -1034,8 +1038,9 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct iprepparm *) ptr;
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
@@ -1151,7 +1156,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc;
- u8 *mem;
+ u8 *mem, *ptr;
struct CPRBX *preqcblk, *prepcblk;
struct ica_xcRB xcrb;
struct aureqparm {
@@ -1208,7 +1213,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
preqcblk->domain = domain;
/* fill request cprb param block with AU request */
- preqparm = (struct aureqparm *) preqcblk->req_parmb;
+ preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "AU", 2);
preqparm->rule_array_len =
sizeof(preqparm->rule_array_len)
@@ -1257,8 +1262,9 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct aurepparm *) ptr;
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
@@ -1347,7 +1353,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
preqcblk->domain = domain;
/* fill request cprb param block with FQ request */
- preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+ preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
memcpy(preqparm->subfunc_code, "FQ", 2);
memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
preqparm->rule_array_len =
@@ -1378,8 +1384,9 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
}
/* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct fqrepparm *) ptr;
ptr = prepparm->lvdata;
/* check and possibly copy reply rule array */
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 266440168bb7..f00127a78bab 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -25,6 +25,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_cex2c.h"
#include "zcrypt_cca_key.h"
+#include "zcrypt_ccamisc.h"
#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
@@ -58,6 +59,91 @@ static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cca_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_cca_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+ .attrs = cca_card_attrs,
+};
+
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct cca_info ci;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cao_state[] = { "invalid", "valid" };
+ static const char * const new_state[] = { "empty", "partial", "full" };
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, zq->online);
+
+ if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ else
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+ if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+ if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_cca_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+ .attrs = cca_queue_attrs,
+};
+
/**
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
@@ -87,24 +173,23 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
int rc, i;
ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.msg)
return -ENOMEM;
rng_type6CPRB_msgX(&ap_msg, 4, &domain);
- msg = ap_msg.message;
+ msg = ap_msg.msg;
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
- rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
- ap_msg.length);
+ rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
- rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
+ rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
@@ -115,13 +200,13 @@ static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
goto out_free;
}
- reply = ap_msg.message;
+ reply = ap_msg.msg;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
- free_page((unsigned long) ap_msg.message);
+ free_page((unsigned long) ap_msg.msg);
return rc;
}
@@ -179,6 +264,17 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
+ return rc;
+ }
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
}
return rc;
@@ -190,8 +286,11 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
{
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
if (zc)
zcrypt_card_unregister(zc);
}
@@ -240,7 +339,19 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
+ return rc;
+ }
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
}
+
return rc;
}
@@ -253,6 +364,8 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index cdaa8348ad04..dc20d983e468 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -250,7 +250,7 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
ep11_get_card_info(ac->id, &ci, zc->online);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
- if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += scnprintf(buf + n, PAGE_SIZE - n,
@@ -345,7 +345,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
&di);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
- if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
if (n > 0)
buf[n++] = ' ';
n += scnprintf(buf + n, PAGE_SIZE - n,
@@ -529,22 +529,27 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
- goto out;
+ return rc;
}
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_card_attr_grp);
- if (rc)
+ if (rc) {
zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_card_attr_grp);
- if (rc)
+ if (rc) {
zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
}
-out:
return rc;
}
@@ -617,22 +622,27 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
- goto out;
+ return rc;
}
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_queue_attr_grp);
- if (rc)
+ if (rc) {
zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_queue_attr_grp);
- if (rc)
+ if (rc) {
zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
}
-out:
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 4f4dd9d727c9..54a04f8c38ef 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -80,7 +80,7 @@ struct error_hdr {
static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
- struct error_hdr *ehdr = reply->message;
+ struct error_hdr *ehdr = reply->msg;
int card = AP_QID_CARD(zq->queue->qid);
int queue = AP_QID_QUEUE(zq->queue->qid);
@@ -127,7 +127,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
- } __packed * head = reply->message;
+ } __packed * head = reply->msg;
unsigned int apfs = *((u32 *)head->fmt2.apfs);
ZCRYPT_DBF(DBF_ERR,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index fc4295b3d801..7aedc338b445 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -207,10 +207,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod_len = mex->inputdatalength;
if (mod_len <= 128) {
- struct type50_meb1_msg *meb1 = ap_msg->message;
+ struct type50_meb1_msg *meb1 = ap_msg->msg;
memset(meb1, 0, sizeof(*meb1));
- ap_msg->length = sizeof(*meb1);
+ ap_msg->len = sizeof(*meb1);
meb1->header.msg_type_code = TYPE50_TYPE_CODE;
meb1->header.msg_len = sizeof(*meb1);
meb1->keyblock_type = TYPE50_MEB1_FMT;
@@ -218,10 +218,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
inp = meb1->message + sizeof(meb1->message) - mod_len;
} else if (mod_len <= 256) {
- struct type50_meb2_msg *meb2 = ap_msg->message;
+ struct type50_meb2_msg *meb2 = ap_msg->msg;
memset(meb2, 0, sizeof(*meb2));
- ap_msg->length = sizeof(*meb2);
+ ap_msg->len = sizeof(*meb2);
meb2->header.msg_type_code = TYPE50_TYPE_CODE;
meb2->header.msg_len = sizeof(*meb2);
meb2->keyblock_type = TYPE50_MEB2_FMT;
@@ -229,10 +229,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
} else if (mod_len <= 512) {
- struct type50_meb3_msg *meb3 = ap_msg->message;
+ struct type50_meb3_msg *meb3 = ap_msg->msg;
memset(meb3, 0, sizeof(*meb3));
- ap_msg->length = sizeof(*meb3);
+ ap_msg->len = sizeof(*meb3);
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
meb3->header.msg_len = sizeof(*meb3);
meb3->keyblock_type = TYPE50_MEB3_FMT;
@@ -275,10 +275,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
* 512 byte modulus (4k keys).
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
- struct type50_crb1_msg *crb1 = ap_msg->message;
+ struct type50_crb1_msg *crb1 = ap_msg->msg;
memset(crb1, 0, sizeof(*crb1));
- ap_msg->length = sizeof(*crb1);
+ ap_msg->len = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
@@ -289,10 +289,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (mod_len <= 256) { /* up to 2048 bit key size */
- struct type50_crb2_msg *crb2 = ap_msg->message;
+ struct type50_crb2_msg *crb2 = ap_msg->msg;
memset(crb2, 0, sizeof(*crb2));
- ap_msg->length = sizeof(*crb2);
+ ap_msg->len = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
@@ -304,10 +304,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
- struct type50_crb3_msg *crb3 = ap_msg->message;
+ struct type50_crb3_msg *crb3 = ap_msg->msg;
memset(crb3, 0, sizeof(*crb3));
- ap_msg->length = sizeof(*crb3);
+ ap_msg->len = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
@@ -350,7 +350,7 @@ static int convert_type80(struct zcrypt_queue *zq,
char __user *outputdata,
unsigned int outputdatalength)
{
- struct type80_hdr *t80h = reply->message;
+ struct type80_hdr *t80h = reply->msg;
unsigned char *data;
if (t80h->len < sizeof(*t80h) + outputdatalength) {
@@ -370,7 +370,7 @@ static int convert_type80(struct zcrypt_queue *zq,
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
else
BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
- data = reply->message + t80h->len - outputdatalength;
+ data = reply->msg + t80h->len - outputdatalength;
if (copy_to_user(outputdata, data, outputdatalength))
return -EFAULT;
return 0;
@@ -382,7 +382,7 @@ static int convert_response(struct zcrypt_queue *zq,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
- unsigned char rtype = ((unsigned char *) reply->message)[1];
+ unsigned char rtype = ((unsigned char *) reply->msg)[1];
switch (rtype) {
case TYPE82_RSP_CODE:
@@ -422,22 +422,20 @@ static void zcrypt_cex2a_receive(struct ap_queue *aq,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct type80_hdr *t80h;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t80h = reply->message;
+ t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
- length = min_t(int,
- CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+ len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
else
- length = min_t(int,
- CEX3A_MAX_RESPONSE_SIZE, t80h->len);
- memcpy(msg->message, reply->message, length);
+ len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+ memcpy(msg->msg, reply->msg, len);
} else
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
out:
complete((struct completion *) msg->private);
}
@@ -460,12 +458,10 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
- GFP_KERNEL);
+ ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
- ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
- GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_cex2a_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -486,7 +482,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, &ap_msg);
out_free:
- kfree(ap_msg.message);
+ kfree(ap_msg.msg);
return rc;
}
@@ -506,12 +502,10 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
- GFP_KERNEL);
+ ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
- ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
- GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_cex2a_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -532,7 +526,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, &ap_msg);
out_free:
- kfree(ap_msg.message);
+ kfree(ap_msg.msg);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index fd1cbb2d6b3f..d77991c74c25 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -266,7 +266,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct function_and_rules_block fr;
unsigned short length;
char text[0];
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
int size;
/*
@@ -301,7 +301,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
- ap_msg->length = size;
+ ap_msg->len = size;
return 0;
}
@@ -336,7 +336,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct function_and_rules_block fr;
unsigned short length;
char text[0];
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
int size;
/*
@@ -370,7 +370,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
msg->fr = static_pkd_fnr;
- ap_msg->length = size;
+ ap_msg->len = size;
return 0;
}
@@ -400,11 +400,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
int rcblen = CEIL4(xcRB->request_control_blk_length);
int replylen, req_sumlen, resp_sumlen;
- char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+ char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
char *function_code;
if (CEIL4(xcRB->request_control_blk_length) <
@@ -412,10 +412,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->length = sizeof(struct type6_hdr) +
+ ap_msg->len = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
- if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+ if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
/*
@@ -480,9 +480,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
if (memcmp(function_code, "US", 2) == 0
|| memcmp(function_code, "AU", 2) == 0)
- ap_msg->special = 1;
- else
- ap_msg->special = 0;
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
/* copy data block */
if (xcRB->request_data_length &&
@@ -512,7 +510,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* payload length format */
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
@@ -527,7 +525,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
+ ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len;
if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
(sizeof(struct type6_hdr)))
return -EINVAL;
@@ -569,7 +567,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
/* enable special processing based on the cprbs flags special bit */
if (msg->cprbx.flags & 0x20)
- ap_msg->special = 1;
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
return 0;
}
@@ -639,7 +637,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
};
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
unsigned short service_rc, service_rs;
unsigned int reply_len, pad_len;
char *data;
@@ -713,8 +711,8 @@ static int convert_type86_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
- struct type86_fmt2_msg *msg = reply->message;
- char *data = reply->message;
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
/* Copy CPRB to user */
if (copy_to_user(xcRB->reply_control_blk_addr,
@@ -744,8 +742,8 @@ static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ep11_urb *xcRB)
{
- struct type86_fmt2_msg *msg = reply->message;
- char *data = reply->message;
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
if (xcRB->resp_len < msg->fmt2.count1)
return -EINVAL;
@@ -766,8 +764,8 @@ static int convert_type86_rng(struct zcrypt_queue *zq,
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
- } __packed * msg = reply->message;
- char *data = reply->message;
+ } __packed * msg = reply->msg;
+ char *data = reply->msg;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
return -EINVAL;
@@ -780,7 +778,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
char __user *outputdata,
unsigned int outputdatalength)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -820,7 +818,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -853,7 +851,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
struct ap_message *reply, struct ep11_urb *xcRB)
{
- struct type86_ep11_reply *msg = reply->message;
+ struct type86_ep11_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -883,7 +881,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
struct ap_message *reply,
char *data)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -928,32 +926,30 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
struct response_type *resp_type =
(struct response_type *) msg->private;
struct type86x_reply *t86r;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t86r = reply->message;
+ t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
- length = sizeof(struct type86x_reply)
- + t86r->length - 2;
- length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ len = sizeof(struct type86x_reply) + t86r->length - 2;
+ len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
break;
case CEXXC_RESPONSE_TYPE_XCRB:
- length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(MSGTYPE06_MAX_MSG_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
break;
default:
- memcpy(msg->message, &error_reply,
- sizeof(error_reply));
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
}
} else
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
out:
complete(&(resp_type->work));
}
@@ -977,25 +973,25 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
struct response_type *resp_type =
(struct response_type *)msg->private;
struct type86_ep11_reply *t86r;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t86r = reply->message;
+ t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x04) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_EP11:
- length = t86r->fmt2.offset1 + t86r->fmt2.count1;
- length = min(MSGTYPE06_MAX_MSG_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
break;
default:
- memcpy(msg->message, &error_reply, sizeof(error_reply));
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
}
} else {
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
}
out:
complete(&(resp_type->work));
@@ -1020,8 +1016,8 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
int rc;
ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_msgtype6_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -1043,7 +1039,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
/* Signal pending. */
ap_cancel_message(zq->queue, &ap_msg);
out_free:
- free_page((unsigned long) ap_msg.message);
+ free_page((unsigned long) ap_msg.msg);
return rc;
}
@@ -1064,8 +1060,8 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
int rc;
ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.msg)
return -ENOMEM;
ap_msg.receive = zcrypt_msgtype6_receive;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -1088,7 +1084,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
ap_cancel_message(zq->queue, &ap_msg);
}
out_free:
- free_page((unsigned long) ap_msg.message);
+ free_page((unsigned long) ap_msg.msg);
return rc;
}
@@ -1107,8 +1103,8 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1162,8 +1158,8 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
.type = CEXXC_RESPONSE_TYPE_EP11,
};
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1193,7 +1189,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* payload length format */
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
unsigned char func_len; /* fixed value 0x4 */
@@ -1256,8 +1252,8 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
.type = CEXXC_RESPONSE_TYPE_XCRB,
};
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
@@ -1290,7 +1286,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
char rule[8];
short int verb_length;
short int key_length;
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
int rc;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 41a0df5f070f..0de280a81dd4 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -127,7 +127,7 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
char rule[8];
short int verb_length;
short int key_length;
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
@@ -154,7 +154,7 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
- ap_msg->length = sizeof(*msg);
+ ap_msg->len = sizeof(*msg);
*domain = (unsigned short)msg->cprbx.domain;
}
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index b7d9fa567880..8bae6ad159a7 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -107,10 +107,10 @@ struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
if (!zq)
return NULL;
- zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
- if (!zq->reply.message)
+ zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
+ if (!zq->reply.msg)
goto out_free;
- zq->reply.length = max_response_size;
+ zq->reply.len = max_response_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(zcrypt_queue_alloc);
void zcrypt_queue_free(struct zcrypt_queue *zq)
{
- kfree(zq->reply.message);
+ kfree(zq->reply.msg);
kfree(zq);
}
EXPORT_SYMBOL(zcrypt_queue_free);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index e95f5b3bef4d..37c6cc374079 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4126,7 +4126,7 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
- struct SGentry *uninitialized_var(ptr);
+ struct SGentry *ptr;
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index fb9848e1d481..0b4499210b95 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4202,7 +4202,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 4d205ebaee87..05c944a3bdca 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -4182,7 +4182,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
- u8 uninitialized_var(bc);
+ u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
u32 regval;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06056e9ec333..c866a4f33871 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1594,31 +1594,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
+ if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+ return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
-
- /*
- * If the block layer didn't complete the request due to a timeout
- * injection, scsi must clear its internal completed state so that the
- * timeout handler will see it needs to escalate its own error
- * recovery.
- */
- if (unlikely(!blk_mq_complete_request(cmd->request)))
- clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
+ blk_mq_complete_request(cmd->request);
}
-static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+static void scsi_mq_put_budget(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
atomic_dec(&sdev->device_busy);
}
-static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+static bool scsi_mq_get_budget(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
return scsi_dev_queue_ready(q, sdev);
@@ -1685,7 +1677,7 @@ out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_put_budget:
- scsi_mq_put_budget(hctx);
+ scsi_mq_put_budget(q);
switch (ret) {
case BLK_STS_OK:
break;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6f7eba66687e..d8b2c49d645b 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -59,6 +59,7 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
zone.non_seq = 1;
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.capacity = zone.len;
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
if (zone.type != ZBC_ZONE_TYPE_CONV &&
@@ -716,6 +717,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
/* The drive satisfies the kernel restrictions: set it up */
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
+ if (sdkp->zones_max_open == U32_MAX)
+ blk_queue_max_open_zones(q, 0);
+ else
+ blk_queue_max_open_zones(q, sdkp->zones_max_open);
+ blk_queue_max_active_zones(q, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/* READ16/WRITE16 is mandatory for ZBC disks */
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index d515d2cc20ed..a9370f4aacca 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,20 +8,12 @@ config IMX_GPCV2_PM_DOMAINS
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
-config IMX_SCU_SOC
- bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU
- select SOC_BUS
- help
- If you say yes here you get support for the NXP i.MX System
- Controller Unit SoC info module, it will provide the SoC info
- like SoC family, ID and revision etc.
-
config SOC_IMX8M
bool "i.MX8M SoC family support"
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
+ select ARM_GIC_V3 if ARCH_MXC
help
If you say yes here you get support for the NXP i.MX8M family
support, it will provide the SoC info like SoC family,
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 446143241fe7..078dc918f4f3 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,4 +5,3 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
-obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 87ee9f767b7a..dc644cfb6419 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -12,6 +12,7 @@
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_REG_TYPE 1
struct cmdq_instruction {
union {
@@ -21,8 +22,17 @@ struct cmdq_instruction {
union {
u16 offset;
u16 event;
+ u16 reg_dst;
+ };
+ union {
+ u8 subsys;
+ struct {
+ u8 sop:5;
+ u8 arg_c_t:1;
+ u8 src_t:1;
+ u8 dst_t:1;
+ };
};
- u8 subsys;
u8 op;
};
@@ -243,6 +253,21 @@ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
}
EXPORT_SYMBOL(cmdq_pkt_clear_event);
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = {};
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_set_event);
+
int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value)
{
@@ -278,7 +303,19 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
}
EXPORT_SYMBOL(cmdq_pkt_poll_mask);
-static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.reg_dst = reg_idx;
+ inst.value = value;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_assign);
+
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
struct cmdq_instruction inst = { {0} };
int err;
@@ -297,6 +334,7 @@ static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
return err;
}
+EXPORT_SYMBOL(cmdq_pkt_finalize);
static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
{
@@ -331,10 +369,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- err = cmdq_pkt_finalize(pkt);
- if (err < 0)
- return err;
-
pkt->cb.cb = cb;
pkt->cb.data = data;
pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 07bb261a63d2..899f8c066797 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -89,7 +89,7 @@ config QCOM_RMTFS_MEM
config QCOM_RPMH
bool "Qualcomm RPM-Hardened (RPMH) Communication"
- depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index bdcf16f88a97..4c9225f15c4e 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -278,13 +278,15 @@ static void pdr_indack_work(struct work_struct *work)
list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
pds = ind->pds;
- pdr_send_indack_msg(pdr, pds, ind->transaction_id);
mutex_lock(&pdr->status_lock);
pds->state = ind->curr_state;
pdr->status(pds->state, pds->service_path, pdr->priv);
mutex_unlock(&pdr->status_lock);
+ /* Ack the indication after clients release the PD resources */
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
mutex_lock(&pdr->list_lock);
list_del(&ind->node);
mutex_unlock(&pdr->list_lock);
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7d622ea1274e..d0e4f520cff8 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/console.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -90,8 +91,14 @@ struct geni_wrapper {
struct device *dev;
void __iomem *base;
struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+ struct geni_icc_path to_core;
};
+static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
+static struct geni_wrapper *earlycon_wrapper;
+
#define QUP_HW_VER_REG 0x4
/* Common SE registers */
@@ -720,11 +727,132 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+{
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+
+ se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]);
+ if (IS_ERR(se->icc_paths[i].path))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ err = PTR_ERR(se->icc_paths[i].path);
+ if (err != -EPROBE_DEFER)
+ dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n",
+ icc_names[i], err);
+ return err;
+
+}
+EXPORT_SYMBOL(geni_icc_get);
+
+int geni_icc_set_bw(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_set_bw(se->icc_paths[i].path,
+ se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_set_bw);
+
+void geni_icc_set_tag(struct geni_se *se, u32 tag)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
+ icc_set_tag(se->icc_paths[i].path, tag);
+}
+EXPORT_SYMBOL(geni_icc_set_tag);
+
+/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
+int geni_icc_enable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_enable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_enable);
+
+int geni_icc_disable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_disable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_disable);
+
+void geni_remove_earlycon_icc_vote(void)
+{
+ struct platform_device *pdev;
+ struct geni_wrapper *wrapper;
+ struct device_node *parent;
+ struct device_node *child;
+
+ if (!earlycon_wrapper)
+ return;
+
+ wrapper = earlycon_wrapper;
+ parent = of_get_next_parent(wrapper->dev->of_node);
+ for_each_child_of_node(parent, child) {
+ if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
+ continue;
+
+ pdev = of_find_device_by_node(child);
+ if (!pdev)
+ continue;
+
+ wrapper = platform_get_drvdata(pdev);
+ icc_put(wrapper->to_core.path);
+ wrapper->to_core.path = NULL;
+
+ }
+ of_node_put(parent);
+
+ earlycon_wrapper = NULL;
+}
+EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
+
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct geni_wrapper *wrapper;
+ struct console __maybe_unused *bcon;
+ bool __maybe_unused has_earlycon = false;
int ret;
wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -747,6 +875,43 @@ static int geni_se_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_SERIAL_EARLYCON
+ for_each_console(bcon) {
+ if (!strcmp(bcon->name, "qcom_geni")) {
+ has_earlycon = true;
+ break;
+ }
+ }
+ if (!has_earlycon)
+ goto exit;
+
+ wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
+ if (IS_ERR(wrapper->to_core.path))
+ return PTR_ERR(wrapper->to_core.path);
+ /*
+ * Put minmal BW request on core clocks on behalf of early console.
+ * The vote will be removed earlycon exit function.
+ *
+ * Note: We are putting vote on each QUP wrapper instead only to which
+ * earlycon is connected because QUP core clock of different wrapper
+ * share same voltage domain. If core1 is put to 0, then core2 will
+ * also run at 0, if not voted. Default ICC vote will be removed ASA
+ * we touch any of the core clock.
+ * core1 = core2 = max(core1, core2)
+ */
+ ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
+ GENI_DEFAULT_BW);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
+ earlycon_wrapper = wrapper;
+ of_node_put(pdev->dev.of_node);
+exit:
+#endif
dev_set_drvdata(dev, wrapper);
dev_dbg(dev, "GENI SE Driver probed\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 076fd27f3081..ae6675782581 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -175,13 +175,21 @@ static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
- u32 new_data;
+ int i;
writel(data, tcs_reg_addr(drv, reg, tcs_id));
- if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data,
- new_data == data, 1, USEC_PER_SEC))
- pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
- data, tcs_id, reg);
+
+ /*
+ * Wait until we read back the same value. Use a counter rather than
+ * ktime for timeout since this may be called after timekeeping stops.
+ */
+ for (i = 0; i < USEC_PER_SEC; i++) {
+ if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
+ return;
+ udelay(1);
+ }
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
}
/**
@@ -1023,6 +1031,7 @@ static struct platform_driver rpmh_driver = {
.driver = {
.name = "rpmh",
.of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index f2b5b46ccd1f..b61e183ede69 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -497,7 +497,7 @@ exit:
*
* Invalidate the sleep and wake values in batch_cache.
*/
-int rpmh_invalidate(const struct device *dev)
+void rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
struct batch_cache_req *req, *tmp;
@@ -509,7 +509,5 @@ int rpmh_invalidate(const struct device *dev)
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 005dd30c58fa..b93218cb50b5 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -20,6 +20,7 @@
* struct qcom_smd_rpm - state of the rpm device driver
* @rpm_channel: reference to the smd channel
* @icc: interconnect proxy device
+ * @dev: rpm device
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
* @ack_status: result of the rpm request
@@ -86,6 +87,7 @@ struct qcom_rpm_message {
/**
* qcom_rpm_smd_write - write @buf to @type:@id
* @rpm: rpm handle
+ * @state: active/sleep state flags
* @type: resource type
* @id: resource identifier
* @buf: the data to be written
@@ -230,9 +232,12 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
{ .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
{ .compatible = "qcom,rpm-sdm660" },
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 5983c6ffb078..e19102f46302 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -24,6 +24,7 @@
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
/*
* SMEM item id, used to acquire handles to respective
@@ -121,6 +122,16 @@ struct socinfo {
__le32 chip_family;
__le32 raw_device_family;
__le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_defective_parts;
+ __le32 ndefective_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
};
#ifdef CONFIG_DEBUG_FS
@@ -135,6 +146,12 @@ struct socinfo_params {
u32 raw_ver;
u32 hw_plat;
u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_defective_parts;
+ u32 ndefective_parts_array_offset;
+ u32 nmodem_supported;
};
struct smem_image_version {
@@ -202,8 +219,10 @@ static const struct soc_id soc_id[] = {
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 318, "SDM630" },
{ 321, "SDM845" },
{ 341, "SDA845" },
+ { 356, "SM8250" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -256,7 +275,10 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
if (model < 0)
return -EINVAL;
- seq_printf(seq, "%s\n", pmic_models[model]);
+ if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
return 0;
}
@@ -272,9 +294,19 @@ static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
return 0;
}
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
QCOM_OPEN(build_id, qcom_show_build_id);
QCOM_OPEN(pmic_model, qcom_show_pmic_model);
QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
#define DEFINE_IMAGE_OPS(type) \
static int show_image_##type(struct seq_file *seq, void *p) \
@@ -312,7 +344,38 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+ debugfs_create_x32("info_fmt", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_defective_parts = __le32_to_cpu(info->num_defective_parts);
+ qcom_socinfo->info.ndefective_parts_array_offset = __le32_to_cpu(info->ndefective_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_defective_parts", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_defective_parts);
+ debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ndefective_parts_array_offset);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ /* Fall through */
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 53cd8d2d0cd2..30984659df90 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -201,6 +201,13 @@ config ARCH_R8A774C0
help
This enables support for the Renesas RZ/G2E SoC.
+config ARCH_R8A774E1
+ bool "Renesas RZ/G2H SoC Platform"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774E1
+ help
+ This enables support for the Renesas RZ/G2H SoC.
+
config ARCH_R8A77950
bool "Renesas R-Car H3 ES1.x SoC Platform"
select ARCH_RCAR_GEN3
@@ -296,6 +303,10 @@ config SYSC_R8A774C0
bool "RZ/G2E System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A774E1
+ bool "RZ/G2H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A7779
bool "R-Car H1 System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 08296d78e2ad..10a399fc486a 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
obj-$(CONFIG_SYSC_R8A774A1) += r8a774a1-sysc.o
obj-$(CONFIG_SYSC_R8A774B1) += r8a774b1-sysc.o
obj-$(CONFIG_SYSC_R8A774C0) += r8a774c0-sysc.o
+obj-$(CONFIG_SYSC_R8A774E1) += r8a774e1-sysc.o
obj-$(CONFIG_SYSC_R8A7779) += r8a7779-sysc.o
obj-$(CONFIG_SYSC_R8A7790) += r8a7790-sysc.o
obj-$(CONFIG_SYSC_R8A7791) += r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a774e1-sysc.c b/drivers/soc/renesas/r8a774e1-sysc.c
new file mode 100644
index 000000000000..18449f746455
--- /dev/null
+++ b/drivers/soc/renesas/r8a774e1-sysc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2H System Controller
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car H3 System Controller
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a774e1-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a774e1_areas[] __initconst = {
+ { "always-on", 0, 0, R8A774E1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A774E1_PD_CA57_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A774E1_PD_CA57_CPU0, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A774E1_PD_CA57_CPU1, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu2", 0x80, 2, R8A774E1_PD_CA57_CPU2, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu3", 0x80, 3, R8A774E1_PD_CA57_CPU3, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A774E1_PD_CA53_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A774E1_PD_CA53_CPU0, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A774E1_PD_CA53_CPU1, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A774E1_PD_CA53_CPU2, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A774E1_PD_CA53_CPU3, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "a3vp", 0x340, 0, R8A774E1_PD_A3VP, R8A774E1_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A774E1_PD_A3VC, R8A774E1_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A774E1_PD_A2VC1, R8A774E1_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A774E1_PD_3DG_A, R8A774E1_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A774E1_PD_3DG_B, R8A774E1_PD_3DG_A },
+ { "3dg-c", 0x100, 2, R8A774E1_PD_3DG_C, R8A774E1_PD_3DG_B },
+ { "3dg-d", 0x100, 3, R8A774E1_PD_3DG_D, R8A774E1_PD_3DG_C },
+ { "3dg-e", 0x100, 4, R8A774E1_PD_3DG_E, R8A774E1_PD_3DG_D },
+};
+
+const struct rcar_sysc_info r8a774e1_sysc_info __initconst = {
+ .areas = r8a774e1_areas,
+ .num_areas = ARRAY_SIZE(r8a774e1_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index a2b2b1768768..a932015ce9c1 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -48,6 +48,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a774a1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774b1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774c0-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774e1-rst", .data = &rcar_rst_gen3 },
/* R-Car Gen1 */
{ .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
{ .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 04ea87a188f1..9b235fc90027 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -296,6 +296,9 @@ static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A774C0
{ .compatible = "renesas,r8a774c0-sysc", .data = &r8a774c0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A774E1
+ { .compatible = "renesas,r8a774e1-sysc", .data = &r8a774e1_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index e417f26fe155..8d861c1cfdf7 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -56,6 +56,7 @@ extern const struct rcar_sysc_info r8a77470_sysc_info;
extern const struct rcar_sysc_info r8a774a1_sysc_info;
extern const struct rcar_sysc_info r8a774b1_sysc_info;
extern const struct rcar_sysc_info r8a774c0_sysc_info;
+extern const struct rcar_sysc_info r8a774e1_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 35dba8b8814e..f815a6a8b88b 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -126,6 +126,11 @@ static const struct renesas_soc soc_rz_g2e __initconst __maybe_unused = {
.id = 0x57,
};
+static const struct renesas_soc soc_rz_g2h __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x4f,
+};
+
static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
.family = &fam_rcar_gen1,
};
@@ -238,6 +243,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A774C0
{ .compatible = "renesas,r8a774c0", .data = &soc_rz_g2e },
#endif
+#ifdef CONFIG_ARCH_R8A774E1
+ { .compatible = "renesas,r8a774e1", .data = &soc_rz_g2h },
+#endif
#ifdef CONFIG_ARCH_R8A7778
{ .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
#endif
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index c7a2003687c7..264185664594 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -37,4 +37,7 @@ config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+config EXYNOS_REGULATOR_COUPLER
+ bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index edd1d6ea064d..ecc3a32f6406 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
+obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
diff --git a/drivers/soc/samsung/exynos-regulator-coupler.c b/drivers/soc/samsung/exynos-regulator-coupler.c
new file mode 100644
index 000000000000..61a156b44a48
--- /dev/null
+++ b/drivers/soc/samsung/exynos-regulator-coupler.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Simplified generic voltage coupler from regulator core.c
+ * The main difference is that it keeps current regulator voltage
+ * if consumers didn't apply their constraints yet.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
+ int *current_uV,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
+ struct regulation_constraints *constraints = rdev->constraints;
+ int desired_min_uV = 0, desired_max_uV = INT_MAX;
+ int max_current_uV = 0, min_current_uV = INT_MAX;
+ int highest_min_uV = 0, target_uV, possible_uV;
+ int i, ret, max_spread, n_coupled = c_desc->n_coupled;
+ bool done;
+
+ *current_uV = -1;
+
+ /* Find highest min desired voltage */
+ for (i = 0; i < n_coupled; i++) {
+ int tmp_min = 0;
+ int tmp_max = INT_MAX;
+
+ lockdep_assert_held_once(&c_rdevs[i]->mutex.base);
+
+ ret = regulator_check_consumers(c_rdevs[i],
+ &tmp_min,
+ &tmp_max, state);
+ if (ret < 0)
+ return ret;
+
+ if (tmp_min == 0) {
+ ret = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (ret < 0)
+ return ret;
+ tmp_min = ret;
+ }
+
+ /* apply constraints */
+ ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max);
+ if (ret < 0)
+ return ret;
+
+ highest_min_uV = max(highest_min_uV, tmp_min);
+
+ if (i == 0) {
+ desired_min_uV = tmp_min;
+ desired_max_uV = tmp_max;
+ }
+ }
+
+ max_spread = constraints->max_spread[0];
+
+ /*
+ * Let target_uV be equal to the desired one if possible.
+ * If not, set it to minimum voltage, allowed by other coupled
+ * regulators.
+ */
+ target_uV = max(desired_min_uV, highest_min_uV - max_spread);
+
+ /*
+ * Find min and max voltages, which currently aren't violating
+ * max_spread.
+ */
+ for (i = 1; i < n_coupled; i++) {
+ int tmp_act;
+
+ tmp_act = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (tmp_act < 0)
+ return tmp_act;
+
+ min_current_uV = min(tmp_act, min_current_uV);
+ max_current_uV = max(tmp_act, max_current_uV);
+ }
+
+ /*
+ * Correct target voltage, so as it currently isn't
+ * violating max_spread
+ */
+ possible_uV = max(target_uV, max_current_uV - max_spread);
+ possible_uV = min(possible_uV, min_current_uV + max_spread);
+
+ if (possible_uV > desired_max_uV)
+ return -EINVAL;
+
+ done = (possible_uV == target_uV);
+ desired_min_uV = possible_uV;
+
+ /* Set current_uV if wasn't done earlier in the code and if necessary */
+ if (*current_uV == -1) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ *current_uV = ret;
+ }
+
+ *min_uV = desired_min_uV;
+ *max_uV = desired_max_uV;
+
+ return done;
+}
+
+static int exynos_coupler_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator_dev **c_rdevs;
+ struct regulator_dev *best_rdev;
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
+ unsigned int delta, best_delta;
+ unsigned long c_rdev_done = 0;
+ bool best_c_rdev_done;
+
+ c_rdevs = c_desc->coupled_rdevs;
+ n_coupled = c_desc->n_coupled;
+
+ /*
+ * Find the best possible voltage change on each loop. Leave the loop
+ * if there isn't any possible change.
+ */
+ do {
+ best_c_rdev_done = false;
+ best_delta = 0;
+ best_min_uV = 0;
+ best_max_uV = 0;
+ best_c_rdev = 0;
+ best_rdev = NULL;
+
+ /*
+ * Find highest difference between optimal voltage
+ * and current voltage.
+ */
+ for (i = 0; i < n_coupled; i++) {
+ /*
+ * optimal_uV is the best voltage that can be set for
+ * i-th regulator at the moment without violating
+ * max_spread constraint in order to balance
+ * the coupled voltages.
+ */
+ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
+
+ if (test_bit(i, &c_rdev_done))
+ continue;
+
+ ret = regulator_get_optimal_voltage(c_rdevs[i],
+ &current_uV,
+ &optimal_uV,
+ &optimal_max_uV,
+ state);
+ if (ret < 0)
+ goto out;
+
+ delta = abs(optimal_uV - current_uV);
+
+ if (delta && best_delta <= delta) {
+ best_c_rdev_done = ret;
+ best_delta = delta;
+ best_rdev = c_rdevs[i];
+ best_min_uV = optimal_uV;
+ best_max_uV = optimal_max_uV;
+ best_c_rdev = i;
+ }
+ }
+
+ /* Nothing to change, return successfully */
+ if (!best_rdev) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = regulator_set_voltage_rdev(best_rdev, best_min_uV,
+ best_max_uV, state);
+
+ if (ret < 0)
+ goto out;
+
+ if (best_c_rdev_done)
+ set_bit(best_c_rdev, &c_rdev_done);
+
+ } while (n_coupled > 1);
+
+out:
+ return ret;
+}
+
+static int exynos_coupler_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static struct regulator_coupler exynos_coupler = {
+ .attach_regulator = exynos_coupler_attach,
+ .balance_voltage = exynos_coupler_balance_voltage,
+};
+
+static int __init exynos_coupler_init(void)
+{
+ if (!of_machine_is_compatible("samsung,exynos5800"))
+ return 0;
+
+ return regulator_coupler_register(&exynos_coupler);
+}
+arch_initcall(exynos_coupler_init);
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 3cdd69d1bd4d..8e416ad91ee2 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -27,7 +27,7 @@ static u32 chipid;
u32 tegra_read_chipid(void)
{
- WARN(!chipid, "Tegra ABP MISC not yet available\n");
+ WARN(!chipid, "Tegra APB MISC not yet available\n");
return chipid;
}
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 5fb2ee2ac978..6dcc21dde0cb 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -109,6 +109,21 @@ struct k3_ring_ops {
};
/**
+ * struct k3_ring_state - Internal state tracking structure
+ *
+ * @free: Number of free entries
+ * @occ: Occupancy
+ * @windex: Write index
+ * @rindex: Read index
+ */
+struct k3_ring_state {
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+};
+
+/**
* struct k3_ring - RA Ring descriptor
*
* @rt: Ring control/status registers
@@ -121,10 +136,6 @@ struct k3_ring_ops {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
- * @free: Number of free elements
- * @occ: Ring occupancy
- * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
- * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
@@ -143,16 +154,17 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
- u32 free;
- u32 occ;
- u32 windex;
- u32 rindex;
+ struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
};
+struct k3_ringacc_ops {
+ int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
+};
+
/**
* struct k3_ringacc - Rings accelerator descriptor
*
@@ -171,6 +183,7 @@ struct k3_ring {
* @tisci: pointer ti-sci handle
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
+ * @ops: SoC specific ringacc operation
*/
struct k3_ringacc {
struct device *dev;
@@ -191,6 +204,8 @@ struct k3_ringacc {
const struct ti_sci_handle *tisci;
const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
u32 tisci_dev_id;
+
+ const struct k3_ringacc_ops *ops;
};
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
@@ -245,6 +260,7 @@ static void k3_ringacc_ring_dump(struct k3_ring *ring)
&ring->ring_mem_dma);
dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+ dev_dbg(dev, "dump flags %08X\n", ring->flags);
dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
@@ -313,6 +329,30 @@ error:
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ if (!fwd_ring || !compl_ring)
+ return -EINVAL;
+
+ *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
+ if (!(*fwd_ring))
+ return -ENODEV;
+
+ *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
+ if (!(*compl_ring)) {
+ k3_ringacc_ring_free(*fwd_ring);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
+
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
struct k3_ringacc *ringacc = ring->parent;
@@ -339,10 +379,7 @@ void k3_ringacc_ring_reset(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return;
- ring->occ = 0;
- ring->free = 0;
- ring->rindex = 0;
- ring->windex = 0;
+ memset(&ring->state, 0, sizeof(ring->state));
k3_ringacc_ring_reset_sci(ring);
}
@@ -556,11 +593,13 @@ static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
{
- struct k3_ringacc *ringacc = ring->parent;
+ struct k3_ringacc *ringacc;
int ret = 0;
if (!ring || !cfg)
return -EINVAL;
+ ringacc = ring->parent;
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -590,10 +629,7 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
ring->size = cfg->size;
ring->elm_size = cfg->elm_size;
ring->mode = cfg->mode;
- ring->occ = 0;
- ring->free = 0;
- ring->rindex = 0;
- ring->windex = 0;
+ memset(&ring->state, 0, sizeof(ring->state));
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
ring->proxy = ringacc->proxy_target_base +
@@ -613,7 +649,7 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
ring->ops = NULL;
ret = -EINVAL;
goto err_free_proxy;
- };
+ }
ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
ring->size * (4 << ring->elm_size),
@@ -664,10 +700,10 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->free)
- ring->free = ring->size - readl(&ring->rt->occ);
+ if (!ring->state.free)
+ ring->state.free = ring->size - readl(&ring->rt->occ);
- return ring->free;
+ return ring->state.free;
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
@@ -738,7 +774,7 @@ static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
"proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_fromio(elem, ptr, (4 << ring->elm_size));
- ring->occ--;
+ ring->state.occ--;
break;
case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
@@ -746,14 +782,14 @@ static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
"proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_toio(ptr, elem, (4 << ring->elm_size));
- ring->free--;
+ ring->state.free--;
break;
default:
return -EINVAL;
}
- dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
- ring->occ);
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
+ ring->state.occ);
return 0;
}
@@ -808,7 +844,7 @@ static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
"memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_fromio(elem, ptr, (4 << ring->elm_size));
- ring->occ--;
+ ring->state.occ--;
break;
case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
@@ -816,14 +852,15 @@ static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
"memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
access_mode);
memcpy_toio(ptr, elem, (4 << ring->elm_size));
- ring->free--;
+ ring->state.free--;
break;
default:
return -EINVAL;
}
- dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
- ring->windex, ring->occ, ring->rindex);
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
+ ring->state.free, ring->state.windex, ring->state.occ,
+ ring->state.rindex);
return 0;
}
@@ -855,16 +892,16 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
- elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
- ring->windex = (ring->windex + 1) % ring->size;
- ring->free--;
+ ring->state.windex = (ring->state.windex + 1) % ring->size;
+ ring->state.free--;
writel(1, &ring->rt->db);
dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
- ring->free, ring->windex);
+ ring->state.free, ring->state.windex);
return 0;
}
@@ -873,16 +910,16 @@ static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
- elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
memcpy(elem, elem_ptr, (4 << ring->elm_size));
- ring->rindex = (ring->rindex + 1) % ring->size;
- ring->occ--;
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
writel(-1, &ring->rt->db);
dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
- ring->occ, ring->rindex, elem_ptr);
+ ring->state.occ, ring->state.rindex, elem_ptr);
return 0;
}
@@ -893,8 +930,8 @@ int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
- ring->windex);
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
+ ring->state.free, ring->state.windex);
if (k3_ringacc_ring_is_full(ring))
return -ENOMEM;
@@ -914,7 +951,7 @@ int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
return -EINVAL;
dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
- ring->free, ring->windex);
+ ring->state.free, ring->state.windex);
if (k3_ringacc_ring_is_full(ring))
return -ENOMEM;
@@ -933,13 +970,13 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->occ)
- ring->occ = k3_ringacc_ring_get_occ(ring);
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
- dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
- ring->rindex);
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
+ ring->state.rindex);
- if (!ring->occ)
+ if (!ring->state.occ)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -956,13 +993,13 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- if (!ring->occ)
- ring->occ = k3_ringacc_ring_get_occ(ring);
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
- dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
- ring->rindex);
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
+ ring->state.occ, ring->state.rindex);
- if (!ring->occ)
+ if (!ring->state.occ)
return -ENODATA;
if (ring->ops && ring->ops->pop_tail)
@@ -1047,21 +1084,14 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
ringacc->rm_gp_range);
}
-static int k3_ringacc_probe(struct platform_device *pdev)
+static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
{
- struct k3_ringacc *ringacc;
void __iomem *base_fifo, *base_rt;
struct device *dev = &pdev->dev;
struct resource *res;
int ret, i;
- ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
- if (!ringacc)
- return -ENOMEM;
-
- ringacc->dev = dev;
- mutex_init(&ringacc->req_lock);
-
dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi_domain) {
@@ -1120,14 +1150,9 @@ static int k3_ringacc_probe(struct platform_device *pdev)
ringacc->rings[i].ring_id = i;
ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
}
- dev_set_drvdata(dev, ringacc);
ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
- mutex_lock(&k3_ringacc_list_lock);
- list_add_tail(&ringacc->list, &k3_ringacc_list);
- mutex_unlock(&k3_ringacc_list_lock);
-
dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
ringacc->num_rings,
ringacc->rm_gp_range->desc[0].start,
@@ -1137,15 +1162,60 @@ static int k3_ringacc_probe(struct platform_device *pdev)
ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+
return 0;
}
+struct ringacc_match_data {
+ struct k3_ringacc_ops ops;
+};
+
+static struct ringacc_match_data k3_ringacc_data = {
+ .ops = {
+ .init = k3_ringacc_init,
+ },
+};
+
/* Match table for of_platform binding */
static const struct of_device_id k3_ringacc_of_match[] = {
- { .compatible = "ti,am654-navss-ringacc", },
+ { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
{},
};
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ const struct ringacc_match_data *match_data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ int ret;
+
+ match = of_match_node(k3_ringacc_of_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ match_data = match->data;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+ ringacc->ops = &match_data->ops;
+
+ ret = ringacc->ops->init(pdev, ringacc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ringacc);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ return 0;
+}
+
static struct platform_driver k3_ringacc_driver = {
.probe = k3_ringacc_probe,
.driver = {
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 1762d89fc05d..fde66e28e046 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -450,7 +450,7 @@ static int knav_acc_free_range(struct knav_range_info *range)
return 0;
}
-struct knav_range_ops knav_acc_range_ops = {
+static struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
index d64feeb51a40..a9472e0e5d61 100644
--- a/drivers/soc/ux500/ux500-soc-id.c
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -146,9 +146,8 @@ static const char * __init ux500_get_revision(void)
return kasprintf(GFP_KERNEL, "%s", "Unknown");
}
-static ssize_t ux500_get_process(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+process_show(struct device *dev, struct device_attribute *attr, char *buf)
{
if (dbx500_id.process == 0x00)
return sprintf(buf, "Standard\n");
@@ -156,6 +155,15 @@ static ssize_t ux500_get_process(struct device *dev,
return sprintf(buf, "%02xnm\n", dbx500_id.process);
}
+static DEVICE_ATTR_RO(process);
+
+static struct attribute *ux500_soc_attrs[] = {
+ &dev_attr_process.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(ux500_soc);
+
static const char *db8500_read_soc_id(struct device_node *backupram)
{
void __iomem *base;
@@ -184,14 +192,11 @@ static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
soc_dev_attr->machine = ux500_get_machine();
soc_dev_attr->family = ux500_get_family();
soc_dev_attr->revision = ux500_get_revision();
+ soc_dev_attr->custom_attr_group = ux500_soc_groups[0];
}
-static const struct device_attribute ux500_soc_attr =
- __ATTR(process, S_IRUGO, ux500_get_process, NULL);
-
static int __init ux500_soc_device_init(void)
{
- struct device *parent;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *backupram;
@@ -217,9 +222,6 @@ static int __init ux500_soc_device_init(void)
return PTR_ERR(soc_dev);
}
- parent = soc_device_to_device(soc_dev);
- device_create_file(parent, &ux500_soc_attr);
-
return 0;
}
subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index ae13fa2aa582..7dcf77ccd31e 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -56,45 +56,47 @@ static const char *integrator_fpga_str(u32 id)
}
}
-static ssize_t integrator_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", integrator_coreid >> 24);
}
-static struct device_attribute integrator_manf_attr =
- __ATTR(manufacturer, S_IRUGO, integrator_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t integrator_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+arch_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
}
-static struct device_attribute integrator_arch_attr =
- __ATTR(arch, S_IRUGO, integrator_get_arch, NULL);
+static DEVICE_ATTR_RO(arch);
-static ssize_t integrator_get_fpga(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
}
-static struct device_attribute integrator_fpga_attr =
- __ATTR(fpga, S_IRUGO, integrator_get_fpga, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t integrator_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
}
-static struct device_attribute integrator_build_attr =
- __ATTR(build, S_IRUGO, integrator_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *integrator_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_arch.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(integrator);
static int __init integrator_soc_init(void)
{
@@ -127,6 +129,7 @@ static int __init integrator_soc_init(void)
soc_dev_attr->soc_id = "Integrator";
soc_dev_attr->machine = "Integrator";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = integrator_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -134,11 +137,6 @@ static int __init integrator_soc_init(void)
}
dev = soc_device_to_device(soc_dev);
- device_create_file(dev, &integrator_manf_attr);
- device_create_file(dev, &integrator_arch_attr);
- device_create_file(dev, &integrator_fpga_attr);
- device_create_file(dev, &integrator_build_attr);
-
dev_info(dev, "Detected ARM core module:\n");
dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index 9471353dd8c3..c6876d232d8f 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -39,45 +39,47 @@ static const char *realview_arch_str(u32 id)
}
}
-static ssize_t realview_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", realview_coreid >> 24);
}
-static struct device_attribute realview_manf_attr =
- __ATTR(manufacturer, S_IRUGO, realview_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t realview_get_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+board_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
}
-static struct device_attribute realview_board_attr =
- __ATTR(board, S_IRUGO, realview_get_board, NULL);
+static DEVICE_ATTR_RO(board);
-static ssize_t realview_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", realview_arch_str(realview_coreid));
}
-static struct device_attribute realview_arch_attr =
- __ATTR(fpga, S_IRUGO, realview_get_arch, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t realview_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (realview_coreid & 0xFF));
}
-static struct device_attribute realview_build_attr =
- __ATTR(build, S_IRUGO, realview_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *realview_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_board.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(realview);
static int realview_soc_probe(struct platform_device *pdev)
{
@@ -102,6 +104,7 @@ static int realview_soc_probe(struct platform_device *pdev)
soc_dev_attr->machine = "RealView";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = realview_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -112,11 +115,6 @@ static int realview_soc_probe(struct platform_device *pdev)
if (ret)
return -ENODEV;
- device_create_file(soc_device_to_device(soc_dev), &realview_manf_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_board_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
-
dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
realview_coreid,
((realview_coreid >> 16) & 0xfff));
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index b5871612613b..7c53ffae9f50 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -4,22 +4,22 @@
#
#Bus Objs
-soundwire-bus-objs := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
+soundwire-bus-y := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
sysfs_slave.o sysfs_slave_dpn.o
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
ifdef CONFIG_DEBUG_FS
-soundwire-bus-objs += debugfs.o
+soundwire-bus-y += debugfs.o
endif
#Cadence Objs
-soundwire-cadence-objs := cadence_master.o
+soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-objs := intel.o intel_init.o
+soundwire-intel-y := intel.o intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
#Qualcomm driver
-soundwire-qcom-objs := qcom.o
+soundwire-qcom-y := qcom.o
obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 24ba77226376..e6e0fb9a81b4 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -863,13 +863,13 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
if (!slave->dev_num)
continue;
- /* Identify if Slave(s) are available on Bus */
- is_slave = true;
-
if (slave->status != SDW_SLAVE_ATTACHED &&
slave->status != SDW_SLAVE_ALERT)
continue;
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
slave_mode = sdw_get_clk_stop_mode(slave);
slave->curr_clk_stop_mode = slave_mode;
@@ -900,6 +900,10 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
return ret;
}
+ /* Don't need to inform slaves if there is no slave attached */
+ if (!is_slave)
+ return ret;
+
/* Inform slaves that prep is done */
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
@@ -985,13 +989,13 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
if (!slave->dev_num)
continue;
- /* Identify if Slave(s) are available on Bus */
- is_slave = true;
-
if (slave->status != SDW_SLAVE_ATTACHED &&
slave->status != SDW_SLAVE_ALERT)
continue;
+ /* Identify if Slave(s) are available on Bus */
+ is_slave = true;
+
mode = slave->curr_clk_stop_mode;
if (mode == SDW_CLK_STOP_MODE1) {
@@ -1016,6 +1020,13 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
if (is_slave && !simple_clk_stop)
sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM);
+ /*
+ * Don't need to call slave callback function if there is no slave
+ * attached
+ */
+ if (!is_slave)
+ return 0;
+
list_for_each_entry(slave, &bus->slaves, node) {
if (!slave->dev_num)
continue;
@@ -1059,12 +1070,119 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
return ret;
}
+static int sdw_slave_set_frequency(struct sdw_slave *slave)
+{
+ u32 mclk_freq = slave->bus->prop.mclk_freq;
+ u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
+ unsigned int scale;
+ u8 scale_index;
+ u8 base;
+ int ret;
+
+ /*
+ * frequency base and scale registers are required for SDCA
+ * devices. They may also be used for 1.2+/non-SDCA devices,
+ * but we will need a DisCo property to cover this case
+ */
+ if (!slave->id.class_id)
+ return 0;
+
+ if (!mclk_freq) {
+ dev_err(&slave->dev,
+ "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map base frequency using Table 89 of SoundWire 1.2 spec.
+ * The order of the tests just follows the specification, this
+ * is not a selection between possible values or a search for
+ * the best value but just a mapping. Only one case per platform
+ * is relevant.
+ * Some BIOS have inconsistent values for mclk_freq but a
+ * correct root so we force the mclk_freq to avoid variations.
+ */
+ if (!(19200000 % mclk_freq)) {
+ mclk_freq = 19200000;
+ base = SDW_SCP_BASE_CLOCK_19200000_HZ;
+ } else if (!(24000000 % mclk_freq)) {
+ mclk_freq = 24000000;
+ base = SDW_SCP_BASE_CLOCK_24000000_HZ;
+ } else if (!(24576000 % mclk_freq)) {
+ mclk_freq = 24576000;
+ base = SDW_SCP_BASE_CLOCK_24576000_HZ;
+ } else if (!(22579200 % mclk_freq)) {
+ mclk_freq = 22579200;
+ base = SDW_SCP_BASE_CLOCK_22579200_HZ;
+ } else if (!(32000000 % mclk_freq)) {
+ mclk_freq = 32000000;
+ base = SDW_SCP_BASE_CLOCK_32000000_HZ;
+ } else {
+ dev_err(&slave->dev,
+ "Unsupported clock base, mclk %d\n",
+ mclk_freq);
+ return -EINVAL;
+ }
+
+ if (mclk_freq % curr_freq) {
+ dev_err(&slave->dev,
+ "mclk %d is not multiple of bus curr_freq %d\n",
+ mclk_freq, curr_freq);
+ return -EINVAL;
+ }
+
+ scale = mclk_freq / curr_freq;
+
+ /*
+ * map scale to Table 90 of SoundWire 1.2 spec - and check
+ * that the scale is a power of two and maximum 64
+ */
+ scale_index = ilog2(scale);
+
+ if (BIT(scale_index) != scale || scale_index > 6) {
+ dev_err(&slave->dev,
+ "No match found for scale %d, bus mclk %d curr_freq %d\n",
+ scale, mclk_freq, curr_freq);
+ return -EINVAL;
+ }
+ scale_index++;
+
+ ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
+ return ret;
+ }
+
+ /* initialize scale for both banks */
+ ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
+ return ret;
+ }
+ ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
+ if (ret < 0)
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
+
+ dev_dbg(&slave->dev,
+ "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
+ base, scale_index, mclk_freq, curr_freq);
+
+ return ret;
+}
+
static int sdw_initialize_slave(struct sdw_slave *slave)
{
struct sdw_slave_prop *prop = &slave->prop;
int ret;
u8 val;
+ ret = sdw_slave_set_frequency(slave);
+ if (ret < 0)
+ return ret;
+
/*
* Set bus clash, parity and SCP implementation
* defined interrupt mask
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index de9a671802b8..6fba55898cf0 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -20,14 +20,16 @@
static const struct sdw_device_id *
sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
{
- const struct sdw_device_id *id = drv->id_table;
+ const struct sdw_device_id *id;
- while (id && id->mfg_id) {
+ for (id = drv->id_table; id && id->mfg_id; id++)
if (slave->id.mfg_id == id->mfg_id &&
- slave->id.part_id == id->part_id)
+ slave->id.part_id == id->part_id &&
+ (!id->sdw_version ||
+ slave->id.sdw_version == id->sdw_version) &&
+ (!id->class_id ||
+ slave->id.class_id == id->class_id))
return id;
- id++;
- }
return NULL;
}
@@ -49,10 +51,11 @@ static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
{
- /* modalias is sdw:m<mfg_id>p<part_id> */
+ /* modalias is sdw:m<mfg_id>p<part_id>v<version>c<class_id> */
- return snprintf(buf, size, "sdw:m%04Xp%04X\n",
- slave->id.mfg_id, slave->id.part_id);
+ return snprintf(buf, size, "sdw:m%04Xp%04Xv%02Xc%02X\n",
+ slave->id.mfg_id, slave->id.part_id,
+ slave->id.sdw_version, slave->id.class_id);
}
int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 9ea87538b9ef..24eafe0aa1c3 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -17,6 +17,7 @@
#include <linux/soundwire/sdw.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <linux/workqueue.h>
#include "bus.h"
#include "cadence_master.h"
@@ -790,7 +791,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
CDNS_MCP_INT_SLAVE_MASK, 0);
int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
- ret = IRQ_WAKE_THREAD;
+ schedule_work(&cdns->work);
}
cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
@@ -799,13 +800,15 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
EXPORT_SYMBOL(sdw_cdns_irq);
/**
- * sdw_cdns_thread() - Cadence irq thread handler
- * @irq: irq number
- * @dev_id: irq context
+ * To update slave status in a work since we will need to handle
+ * other interrupts eg. CDNS_MCP_INT_RX_WL during the update slave
+ * process.
+ * @work: cdns worker thread
*/
-irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
+static void cdns_update_slave_status_work(struct work_struct *work)
{
- struct sdw_cdns *cdns = dev_id;
+ struct sdw_cdns *cdns =
+ container_of(work, struct sdw_cdns, work);
u32 slave0, slave1;
dev_dbg_ratelimited(cdns->dev, "Slave status change\n");
@@ -822,9 +825,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
- return IRQ_HANDLED;
}
-EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
@@ -1427,6 +1428,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
init_completion(&cdns->tx_complete);
cdns->bus.port_ops = &cdns_port_ops;
+ INIT_WORK(&cdns->work, cdns_update_slave_status_work);
return 0;
}
EXPORT_SYMBOL(sdw_cdns_probe);
@@ -1437,25 +1439,49 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_cdns_dma_data *dma;
- dma = kzalloc(sizeof(*dma), GFP_KERNEL);
- if (!dma)
- return -ENOMEM;
+ if (stream) {
+ /* first paranoia check */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dma = dai->playback_dma_data;
+ else
+ dma = dai->capture_dma_data;
+
+ if (dma) {
+ dev_err(dai->dev,
+ "dma_data already allocated for dai %s\n",
+ dai->name);
+ return -EINVAL;
+ }
- if (pcm)
- dma->stream_type = SDW_STREAM_PCM;
- else
- dma->stream_type = SDW_STREAM_PDM;
+ /* allocate and set dma info */
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
- dma->bus = &cdns->bus;
- dma->link_id = cdns->instance;
+ if (pcm)
+ dma->stream_type = SDW_STREAM_PCM;
+ else
+ dma->stream_type = SDW_STREAM_PDM;
- dma->stream = stream;
+ dma->bus = &cdns->bus;
+ dma->link_id = cdns->instance;
- if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = dma;
- else
- dai->capture_dma_data = dma;
+ dma->stream = stream;
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_dma_data = dma;
+ else
+ dai->capture_dma_data = dma;
+ } else {
+ /* for NULL stream we release allocated dma_data */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ kfree(dai->playback_dma_data);
+ dai->playback_dma_data = NULL;
+ } else {
+ kfree(dai->capture_dma_data);
+ dai->capture_dma_data = NULL;
+ }
+ }
return 0;
}
EXPORT_SYMBOL(cdns_set_sdw_stream);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index b410656f8194..7638858397df 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -129,6 +129,10 @@ struct sdw_cdns {
bool link_up;
unsigned int msg_count;
+
+ struct work_struct work;
+
+ struct list_head list;
};
#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index c7422740edd4..a283670659a9 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
+#include <linux/pm_runtime.h>
#include <sound/soc.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
@@ -46,7 +47,8 @@
#define SDW_SHIM_LCTL_SPA BIT(0)
#define SDW_SHIM_LCTL_CPA BIT(8)
-#define SDW_SHIM_SYNC_SYNCPRD_VAL 0x176F
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
@@ -92,23 +94,12 @@
#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
-#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
-
enum intel_pdi_type {
INTEL_PDI_IN = 0,
INTEL_PDI_OUT = 1,
INTEL_PDI_BD = 2,
};
-struct sdw_intel {
- struct sdw_cdns cdns;
- int instance;
- struct sdw_intel_link_res *link_res;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
-};
-
#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
/*
@@ -134,40 +125,33 @@ static inline void intel_writew(void __iomem *base, int offset, u16 value)
writew(value, base + offset);
}
-static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
+static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
{
int timeout = 10;
u32 reg_read;
- writel(value, base + offset);
do {
reg_read = readl(base + offset);
- if (!(reg_read & mask))
+ if ((reg_read & mask) == target)
return 0;
timeout--;
- udelay(50);
+ usleep_range(50, 100);
} while (timeout != 0);
return -EAGAIN;
}
-static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
{
- int timeout = 10;
- u32 reg_read;
-
writel(value, base + offset);
- do {
- reg_read = readl(base + offset);
- if (reg_read & mask)
- return 0;
-
- timeout--;
- udelay(50);
- } while (timeout != 0);
+ return intel_wait_bit(base, offset, mask, 0);
+}
- return -EAGAIN;
+static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+ writel(value, base + offset);
+ return intel_wait_bit(base, offset, mask, mask);
}
/*
@@ -290,8 +274,46 @@ static int intel_link_power_up(struct sdw_intel *sdw)
{
unsigned int link_id = sdw->instance;
void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+ struct sdw_master_prop *prop = &bus->prop;
int spa_mask, cpa_mask;
- int link_control, ret;
+ int link_control;
+ int ret = 0;
+ u32 syncprd;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /*
+ * The hardware relies on an internal counter, typically 4kHz,
+ * to generate the SoundWire SSP - which defines a 'safe'
+ * synchronization point between commands and audio transport
+ * and allows for multi link synchronization. The SYNCPRD value
+ * is only dependent on the oscillator clock provided to
+ * the IP, so adjust based on _DSD properties reported in DSDT
+ * tables. The values reported are based on either 24MHz
+ * (CNL/CML) or 38.4 MHz (ICL/TGL+).
+ */
+ if (prop->mclk_freq % 6000000)
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
+ else
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
+
+ if (!*shim_mask) {
+ /* we first need to program the SyncPRD/CPU registers */
+ dev_dbg(sdw->cdns.dev,
+ "%s: first link up, programming SYNCPRD\n", __func__);
+
+ /* set SyncPRD period */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= (syncprd <<
+ SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
+
+ /* Set SyncCPU bit */
+ sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+ }
/* Link power up sequence */
link_control = intel_readl(shim, SDW_SHIM_LCTL);
@@ -300,66 +322,218 @@ static int intel_link_power_up(struct sdw_intel *sdw)
link_control |= spa_mask;
ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
+ goto out;
+ }
+
+ if (!*shim_mask) {
+ /* SyncCPU will change once link is active */
+ ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
+ SDW_SHIM_SYNC_SYNCCPU, 0);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev,
+ "Failed to set SHIM_SYNC: %d\n", ret);
+ goto out;
+ }
+ }
+
+ *shim_mask |= BIT(link_id);
sdw->cdns.link_up = true;
- return 0;
+out:
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
}
-static int intel_shim_init(struct sdw_intel *sdw)
+/* this needs to be called with shim_lock */
+static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int sync_reg, ret;
- u16 ioctl = 0, act = 0;
+ u16 ioctl;
- /* Initialize Shim */
- ioctl |= SDW_SHIM_IOCTL_BKE;
+ /* Switch to MIP from Glue logic */
+ ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+
+ ioctl &= ~(SDW_SHIM_IOCTL_DOE);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_WPDD;
+ ioctl &= ~(SDW_SHIM_IOCTL_DO);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_DO;
+ ioctl |= (SDW_SHIM_IOCTL_MIF);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= SDW_SHIM_IOCTL_DOE;
+ ioctl &= ~(SDW_SHIM_IOCTL_BKE);
+ ioctl &= ~(SDW_SHIM_IOCTL_COE);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- /* Switch to MIP from Glue logic */
- ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+ /* at this point Master IP has full control of the I/Os */
+}
- ioctl &= ~(SDW_SHIM_IOCTL_DOE);
+/* this needs to be called with shim_lock */
+static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u16 ioctl;
+
+ /* Glue logic */
+ ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+ ioctl |= SDW_SHIM_IOCTL_BKE;
+ ioctl |= SDW_SHIM_IOCTL_COE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl &= ~(SDW_SHIM_IOCTL_DO);
+ ioctl &= ~(SDW_SHIM_IOCTL_MIF);
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl |= (SDW_SHIM_IOCTL_MIF);
+ /* at this point Integration Glue has full control of the I/Os */
+}
+
+static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ unsigned int link_id = sdw->instance;
+ int ret = 0;
+ u16 ioctl = 0, act = 0;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /* Initialize Shim */
+ ioctl |= SDW_SHIM_IOCTL_BKE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
- ioctl &= ~(SDW_SHIM_IOCTL_BKE);
- ioctl &= ~(SDW_SHIM_IOCTL_COE);
+ ioctl |= SDW_SHIM_IOCTL_WPDD;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+ ioctl |= SDW_SHIM_IOCTL_DO;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+
+ ioctl |= SDW_SHIM_IOCTL_DOE;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+ usleep_range(10, 15);
+
+ intel_shim_glue_to_master_ip(sdw);
act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS);
act |= SDW_SHIM_CTMCTL_DACTQE;
act |= SDW_SHIM_CTMCTL_DODS;
intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
+ usleep_range(10, 15);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
+static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ unsigned int link_id = sdw->instance;
+ u16 wake_en, wake_sts;
+
+ mutex_lock(sdw->link_res->shim_lock);
+ wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
+
+ if (wake_enable) {
+ /* Enable the wakeup */
+ wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
+ } else {
+ /* Disable the wake up interrupt */
+ wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
+
+ /* Clear wake status */
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+ wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
+ intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts);
+ }
+ mutex_unlock(sdw->link_res->shim_lock);
+}
+
+static int __maybe_unused intel_link_power_down(struct sdw_intel *sdw)
+{
+ int link_control, spa_mask, cpa_mask;
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ int ret = 0;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ intel_shim_master_ip_to_glue(sdw);
+
+ /* Link power down sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+ spa_mask = ~(SDW_SHIM_LCTL_SPA << link_id);
+ cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
+ link_control &= spa_mask;
+
+ ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+
+ if (!(*shim_mask & BIT(link_id)))
+ dev_err(sdw->cdns.dev,
+ "%s: Unbalanced power-up/down calls\n", __func__);
+
+ *shim_mask &= ~BIT(link_id);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ if (ret < 0)
+ return ret;
+
+ sdw->cdns.link_up = false;
+ return 0;
+}
+
+static void intel_shim_sync_arm(struct sdw_intel *sdw)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /* update SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+}
+
+static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
+{
+ void __iomem *shim = sdw->link_res->shim;
+ u32 sync_reg;
+ int ret;
- /* Now set SyncPRD period */
+ /* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- sync_reg |= (SDW_SHIM_SYNC_SYNCPRD_VAL <<
- SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
- /* Set SyncCPU bit */
- sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ /*
+ * Set SyncGO bit to synchronously trigger a bank switch for
+ * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
+ * the Masters.
+ */
+ sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+
ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCCPU);
+ SDW_SHIM_SYNC_SYNCGO);
+
if (ret < 0)
- dev_err(sdw->cdns.dev, "Failed to set sync period: %d\n", ret);
+ dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
return ret;
}
@@ -577,17 +751,12 @@ static int intel_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
- void __iomem *shim = sdw->link_res->shim;
- int sync_reg;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
- /* Read SYNC register */
- sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance;
- intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+ intel_shim_sync_arm(sdw);
return 0;
}
@@ -603,6 +772,8 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
if (!bus->multi_link)
return 0;
+ mutex_lock(sdw->link_res->shim_lock);
+
/* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
@@ -614,18 +785,15 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
*
* So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
*/
- if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK))
- return 0;
+ if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
+ ret = 0;
+ goto unlock;
+ }
- /*
- * Set SyncGO bit to synchronously trigger a bank switch for
- * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
- * the Masters.
- */
- sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+ ret = intel_shim_sync_go_unlocked(sdw);
+unlock:
+ mutex_unlock(sdw->link_res->shim_lock);
- ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
- SDW_SHIM_SYNC_SYNCGO);
if (ret < 0)
dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
@@ -636,57 +804,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static int sdw_stream_setup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct sdw_stream_runtime *sdw_stream = NULL;
- char *name;
- int i, ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- name = kasprintf(GFP_KERNEL, "%s-Playback", dai->name);
- else
- name = kasprintf(GFP_KERNEL, "%s-Capture", dai->name);
-
- if (!name)
- return -ENOMEM;
-
- sdw_stream = sdw_alloc_stream(name);
- if (!sdw_stream) {
- dev_err(dai->dev, "alloc stream failed for DAI %s", dai->name);
- ret = -ENOMEM;
- goto error;
- }
-
- /* Set stream pointer on CPU DAI */
- ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
- if (ret < 0) {
- dev_err(dai->dev, "failed to set stream pointer on cpu dai %s",
- dai->name);
- goto release_stream;
- }
-
- /* Set stream pointer on all CODEC DAIs */
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(asoc_rtd_to_codec(rtd, i), sdw_stream,
- substream->stream);
- if (ret < 0) {
- dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
- asoc_rtd_to_codec(rtd, i)->name);
- goto release_stream;
- }
- }
-
- return 0;
-
-release_stream:
- sdw_release_stream(sdw_stream);
-error:
- kfree(name);
- return ret;
-}
-
static int intel_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -694,8 +811,7 @@ static int intel_startup(struct snd_pcm_substream *substream,
* TODO: add pm_runtime support here, the startup callback
* will make sure the IP is 'active'
*/
-
- return sdw_stream_setup(substream, dai);
+ return 0;
}
static int intel_hw_params(struct snd_pcm_substream *substream,
@@ -863,23 +979,13 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
- kfree(dma->stream->name);
- sdw_release_stream(dma->stream);
-
return 0;
}
static void intel_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct sdw_cdns_dma_data *dma;
- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma)
- return;
-
- snd_soc_dai_set_dma_data(dai, substream, NULL);
- kfree(dma);
}
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
@@ -894,6 +1000,22 @@ static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
return cdns_set_sdw_stream(dai, stream, false, direction);
}
+static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ struct sdw_cdns_dma_data *dma;
+
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dma = dai->playback_dma_data;
+ else
+ dma = dai->capture_dma_data;
+
+ if (!dma)
+ return NULL;
+
+ return dma->stream;
+}
+
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
@@ -902,6 +1024,7 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pcm_set_sdw_stream,
+ .get_sdw_stream = intel_get_sdw_stream,
};
static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
@@ -912,6 +1035,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pdm_set_sdw_stream,
+ .get_sdw_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
@@ -1074,9 +1198,17 @@ static struct sdw_master_ops sdw_intel_ops = {
static int intel_init(struct sdw_intel *sdw)
{
+ bool clock_stop;
+
/* Initialize shim and controller */
intel_link_power_up(sdw);
- intel_shim_init(sdw);
+
+ clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
+
+ intel_shim_init(sdw, clock_stop);
+
+ if (clock_stop)
+ return 0;
return sdw_cdns_init(&sdw->cdns);
}
@@ -1084,41 +1216,66 @@ static int intel_init(struct sdw_intel *sdw)
/*
* probe and init
*/
-static int intel_probe(struct platform_device *pdev)
+static int intel_master_probe(struct platform_device *pdev)
{
- struct sdw_cdns_stream_config config;
+ struct device *dev = &pdev->dev;
struct sdw_intel *sdw;
+ struct sdw_cdns *cdns;
+ struct sdw_bus *bus;
int ret;
- sdw = devm_kzalloc(&pdev->dev, sizeof(*sdw), GFP_KERNEL);
+ sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
if (!sdw)
return -ENOMEM;
+ cdns = &sdw->cdns;
+ bus = &cdns->bus;
+
sdw->instance = pdev->id;
- sdw->link_res = dev_get_platdata(&pdev->dev);
- sdw->cdns.dev = &pdev->dev;
- sdw->cdns.registers = sdw->link_res->registers;
- sdw->cdns.instance = sdw->instance;
- sdw->cdns.msg_count = 0;
- sdw->cdns.bus.link_id = pdev->id;
+ sdw->link_res = dev_get_platdata(dev);
+ cdns->dev = dev;
+ cdns->registers = sdw->link_res->registers;
+ cdns->instance = sdw->instance;
+ cdns->msg_count = 0;
+
+ bus->link_id = pdev->id;
- sdw_cdns_probe(&sdw->cdns);
+ sdw_cdns_probe(cdns);
/* Set property read ops */
sdw_intel_ops.read_prop = intel_prop_read;
- sdw->cdns.bus.ops = &sdw_intel_ops;
+ bus->ops = &sdw_intel_ops;
- platform_set_drvdata(pdev, sdw);
+ /* set driver data, accessed by snd_soc_dai_get_drvdata() */
+ dev_set_drvdata(dev, cdns);
- ret = sdw_bus_master_add(&sdw->cdns.bus, &pdev->dev, pdev->dev.fwnode);
+ ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
- dev_err(&pdev->dev, "sdw_bus_master_add fail: %d\n", ret);
+ dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
return ret;
}
- if (sdw->cdns.bus.prop.hw_disabled) {
- dev_info(&pdev->dev, "SoundWire master %d is disabled, ignoring\n",
- sdw->cdns.bus.link_id);
+ if (bus->prop.hw_disabled)
+ dev_info(dev,
+ "SoundWire master %d is disabled, will be ignored\n",
+ bus->link_id);
+
+ return 0;
+}
+
+int intel_master_startup(struct platform_device *pdev)
+{
+ struct sdw_cdns_stream_config config;
+ struct device *dev = &pdev->dev;
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_info(dev,
+ "SoundWire master %d is disabled, ignoring\n",
+ sdw->instance);
return 0;
}
@@ -1129,39 +1286,29 @@ static int intel_probe(struct platform_device *pdev)
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
- ret = sdw_cdns_pdi_init(&sdw->cdns, config);
+ ret = sdw_cdns_pdi_init(cdns, config);
if (ret)
goto err_init;
intel_pdi_ch_update(sdw);
- /* Acquire IRQ */
- ret = request_threaded_irq(sdw->link_res->irq,
- sdw_cdns_irq, sdw_cdns_thread,
- IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
+ ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
- dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
- sdw->link_res->irq);
+ dev_err(dev, "cannot enable interrupts\n");
goto err_init;
}
- ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
- dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
- goto err_init;
- }
-
- ret = sdw_cdns_exit_reset(&sdw->cdns);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ dev_err(dev, "unable to exit bus reset sequence\n");
goto err_interrupt;
}
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
- dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
- snd_soc_unregister_component(sdw->cdns.dev);
+ dev_err(dev, "DAI registration failed: %d\n", ret);
+ snd_soc_unregister_component(dev);
goto err_interrupt;
}
@@ -1170,41 +1317,75 @@ static int intel_probe(struct platform_device *pdev)
return 0;
err_interrupt:
- sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->link_res->irq, sdw);
+ sdw_cdns_enable_interrupt(cdns, false);
err_init:
- sdw_bus_master_delete(&sdw->cdns.bus);
return ret;
}
-static int intel_remove(struct platform_device *pdev)
+static int intel_master_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+
+ if (!bus->prop.hw_disabled) {
+ intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(cdns, false);
+ snd_soc_unregister_component(dev);
+ }
+ sdw_bus_master_delete(bus);
+
+ return 0;
+}
+
+int intel_master_process_wakeen_event(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct sdw_intel *sdw;
+ struct sdw_bus *bus;
+ void __iomem *shim;
+ u16 wake_sts;
sdw = platform_get_drvdata(pdev);
+ bus = &sdw->cdns.bus;
- if (!sdw->cdns.bus.prop.hw_disabled) {
- intel_debugfs_exit(sdw);
- sdw_cdns_enable_interrupt(&sdw->cdns, false);
- free_irq(sdw->link_res->irq, sdw);
- snd_soc_unregister_component(sdw->cdns.dev);
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n", bus->link_id);
+ return 0;
}
- sdw_bus_master_delete(&sdw->cdns.bus);
+
+ shim = sdw->link_res->shim;
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+
+ if (!(wake_sts & BIT(sdw->instance)))
+ return 0;
+
+ /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
+ intel_shim_wake(sdw, false);
+
+ /*
+ * resume the Master, which will generate a bus reset and result in
+ * Slaves re-attaching and be re-enumerated. The SoundWire physical
+ * device which generated the wake will trigger an interrupt, which
+ * will in turn cause the corresponding Linux Slave device to be
+ * resumed and the Slave codec driver to check the status.
+ */
+ pm_request_resume(dev);
return 0;
}
static struct platform_driver sdw_intel_drv = {
- .probe = intel_probe,
- .remove = intel_remove,
+ .probe = intel_master_probe,
+ .remove = intel_master_remove,
.driver = {
- .name = "int-sdw",
-
+ .name = "intel-sdw",
},
};
module_platform_driver(sdw_intel_drv);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:int-sdw");
+MODULE_ALIAS("platform:intel-sdw");
MODULE_DESCRIPTION("Intel Soundwire Master Driver");
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 38b7c125fb10..4ea3d262d249 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -15,6 +15,10 @@
* @irq: Interrupt line
* @ops: Shim callback ops
* @dev: device implementing hw_params and free callbacks
+ * @shim_lock: mutex to handle access to shared SHIM registers
+ * @shim_mask: global pointer to check SHIM register initialization
+ * @cdns: Cadence master descriptor
+ * @list: used to walk-through all masters exposed by the same controller
*/
struct sdw_intel_link_res {
struct platform_device *pdev;
@@ -25,6 +29,24 @@ struct sdw_intel_link_res {
int irq;
const struct sdw_intel_ops *ops;
struct device *dev;
+ struct mutex *shim_lock; /* protect shared registers */
+ u32 *shim_mask;
+ struct sdw_cdns *cdns;
+ struct list_head list;
};
+struct sdw_intel {
+ struct sdw_cdns cdns;
+ int instance;
+ struct sdw_intel_link_res *link_res;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
+
+int intel_master_startup(struct platform_device *pdev);
+int intel_master_process_wakeen_event(struct platform_device *pdev);
+
#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d5d42795a48f..047252a91c9e 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,10 +9,12 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
+#include "cadence_master.h"
#include "intel.h"
#define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */
@@ -23,138 +25,324 @@
#define SDW_LINK_BASE 0x30000
#define SDW_LINK_SIZE 0x10000
-static int link_mask;
-module_param_named(sdw_link_mask, link_mask, int, 0444);
+static int ctrl_link_mask;
+module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444);
MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)");
-static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
+static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
+{
+ struct fwnode_handle *link;
+ char name[32];
+ u32 quirk_mask = 0;
+
+ /* Find master handle */
+ snprintf(name, sizeof(name),
+ "mipi-sdw-link-%d-subproperties", i);
+
+ link = fwnode_get_named_child_node(fw_node, name);
+ if (!link)
+ return false;
+
+ fwnode_property_read_u32(link,
+ "intel-quirk-mask",
+ &quirk_mask);
+
+ if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
+ return false;
+
+ return true;
+}
+
+static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx)
{
struct sdw_intel_link_res *link = ctx->links;
+ u32 link_mask;
int i;
if (!link)
return 0;
- for (i = 0; i < ctx->count; i++) {
+ link_mask = ctx->link_mask;
+
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
if (link->pdev)
platform_device_unregister(link->pdev);
- link++;
}
- kfree(ctx->links);
- ctx->links = NULL;
-
return 0;
}
-static struct sdw_intel_ctx
-*sdw_intel_add_controller(struct sdw_intel_res *res)
+static int
+sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
- struct platform_device_info pdevinfo;
- struct platform_device *pdev;
- struct sdw_intel_link_res *link;
- struct sdw_intel_ctx *ctx;
struct acpi_device *adev;
int ret, i;
u8 count;
- u32 caps;
- if (acpi_bus_get_device(res->handle, &adev))
- return NULL;
+ if (acpi_bus_get_device(info->handle, &adev))
+ return -EINVAL;
/* Found controller, find links supported */
count = 0;
ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
"mipi-sdw-master-count", &count, 1);
- /* Don't fail on error, continue and use hw value */
+ /*
+ * In theory we could check the number of links supported in
+ * hardware, but in that step we cannot assume SoundWire IP is
+ * powered.
+ *
+ * In addition, if the BIOS doesn't even provide this
+ * 'master-count' property then all the inits based on link
+ * masks will fail as well.
+ *
+ * We will check the hardware capabilities in the startup() step
+ */
+
if (ret) {
dev_err(&adev->dev,
"Failed to read mipi-sdw-master-count: %d\n", ret);
- count = SDW_MAX_LINKS;
+ return -EINVAL;
}
- /* Check SNDWLCAP.LCOUNT */
- caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
- caps &= GENMASK(2, 0);
-
- /* Check HW supported vs property value and use min of two */
- count = min_t(u8, caps, count);
-
/* Check count is within bounds */
if (count > SDW_MAX_LINKS) {
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_MAX_LINKS);
- return NULL;
+ return -EINVAL;
}
if (!count) {
dev_warn(&adev->dev, "No SoundWire links detected\n");
- return NULL;
+ return -EINVAL;
+ }
+ dev_dbg(&adev->dev, "ACPI reports %d SDW Link devices\n", count);
+
+ info->count = count;
+ info->link_mask = 0;
+
+ for (i = 0; i < count; i++) {
+ if (ctrl_link_mask && !(ctrl_link_mask & BIT(i))) {
+ dev_dbg(&adev->dev,
+ "Link %d masked, will not be enabled\n", i);
+ continue;
+ }
+
+ if (!is_link_enabled(acpi_fwnode_handle(adev), i)) {
+ dev_dbg(&adev->dev,
+ "Link %d not selected in firmware\n", i);
+ continue;
+ }
+
+ info->link_mask |= BIT(i);
}
+ return 0;
+}
+
+#define HDA_DSP_REG_ADSPIC2 (0x10)
+#define HDA_DSP_REG_ADSPIS2 (0x14)
+#define HDA_DSP_REG_ADSPIC2_SNDW BIT(5)
+
+/**
+ * sdw_intel_enable_irq() - enable/disable Intel SoundWire IRQ
+ * @mmio_base: The mmio base of the control register
+ * @enable: true if enable
+ */
+void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable)
+{
+ u32 val;
+
+ val = readl(mmio_base + HDA_DSP_REG_ADSPIC2);
+
+ if (enable)
+ val |= HDA_DSP_REG_ADSPIC2_SNDW;
+ else
+ val &= ~HDA_DSP_REG_ADSPIC2_SNDW;
+
+ writel(val, mmio_base + HDA_DSP_REG_ADSPIC2);
+}
+EXPORT_SYMBOL_NS(sdw_intel_enable_irq, SOUNDWIRE_INTEL_INIT);
+
+irqreturn_t sdw_intel_thread(int irq, void *dev_id)
+{
+ struct sdw_intel_ctx *ctx = dev_id;
+ struct sdw_intel_link_res *link;
+
+ list_for_each_entry(link, &ctx->link_list, list)
+ sdw_cdns_irq(irq, link->cdns);
+
+ sdw_intel_enable_irq(ctx->mmio_base, true);
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_NS(sdw_intel_thread, SOUNDWIRE_INTEL_INIT);
+
+static struct sdw_intel_ctx
+*sdw_intel_probe_controller(struct sdw_intel_res *res)
+{
+ struct platform_device_info pdevinfo;
+ struct platform_device *pdev;
+ struct sdw_intel_link_res *link;
+ struct sdw_intel_ctx *ctx;
+ struct acpi_device *adev;
+ struct sdw_slave *slave;
+ struct list_head *node;
+ struct sdw_bus *bus;
+ u32 link_mask;
+ int num_slaves = 0;
+ int count;
+ int i;
+
+ if (!res)
+ return NULL;
+
+ if (acpi_bus_get_device(res->handle, &adev))
+ return NULL;
+
+ if (!res->count)
+ return NULL;
+
+ count = res->count;
dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&adev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
ctx->count = count;
- ctx->links = kcalloc(ctx->count, sizeof(*ctx->links), GFP_KERNEL);
+ ctx->links = devm_kcalloc(&adev->dev, ctx->count,
+ sizeof(*ctx->links), GFP_KERNEL);
if (!ctx->links)
- goto link_err;
+ return NULL;
+
+ ctx->count = count;
+ ctx->mmio_base = res->mmio_base;
+ ctx->link_mask = res->link_mask;
+ ctx->handle = res->handle;
+ mutex_init(&ctx->shim_lock);
link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ INIT_LIST_HEAD(&ctx->link_list);
/* Create SDW Master devices */
- for (i = 0; i < count; i++) {
- if (link_mask && !(link_mask & BIT(i))) {
+ for (i = 0; i < count; i++, link++) {
+ if (!(link_mask & BIT(i))) {
dev_dbg(&adev->dev,
"Link %d masked, will not be enabled\n", i);
- link++;
continue;
}
+ link->mmio_base = res->mmio_base;
link->registers = res->mmio_base + SDW_LINK_BASE
- + (SDW_LINK_SIZE * i);
+ + (SDW_LINK_SIZE * i);
link->shim = res->mmio_base + SDW_SHIM_BASE;
link->alh = res->mmio_base + SDW_ALH_BASE;
link->ops = res->ops;
link->dev = res->dev;
+ link->shim_lock = &ctx->shim_lock;
+ link->shim_mask = &ctx->shim_mask;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = res->parent;
- pdevinfo.name = "int-sdw";
+ pdevinfo.name = "intel-sdw";
pdevinfo.id = i;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.data = link;
+ pdevinfo.size_data = sizeof(*link);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
dev_err(&adev->dev,
"platform device creation failed: %ld\n",
PTR_ERR(pdev));
- goto pdev_err;
+ goto err;
}
-
link->pdev = pdev;
- link++;
+ link->cdns = platform_get_drvdata(pdev);
+
+ list_add_tail(&link->list, &ctx->link_list);
+ bus = &link->cdns->bus;
+ /* Calculate number of slaves */
+ list_for_each(node, &bus->slaves)
+ num_slaves++;
+ }
+
+ ctx->ids = devm_kcalloc(&adev->dev, num_slaves,
+ sizeof(*ctx->ids), GFP_KERNEL);
+ if (!ctx->ids)
+ goto err;
+
+ ctx->num_slaves = num_slaves;
+ i = 0;
+ list_for_each_entry(link, &ctx->link_list, list) {
+ bus = &link->cdns->bus;
+ list_for_each_entry(slave, &bus->slaves, node) {
+ ctx->ids[i].id = slave->id;
+ ctx->ids[i].link_id = bus->link_id;
+ i++;
+ }
}
return ctx;
-pdev_err:
- sdw_intel_cleanup_pdev(ctx);
-link_err:
- kfree(ctx);
+err:
+ ctx->count = i;
+ sdw_intel_cleanup(ctx);
return NULL;
}
+static int
+sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
+{
+ struct acpi_device *adev;
+ struct sdw_intel_link_res *link;
+ u32 caps;
+ u32 link_mask;
+ int i;
+
+ if (acpi_bus_get_device(ctx->handle, &adev))
+ return -EINVAL;
+
+ /* Check SNDWLCAP.LCOUNT */
+ caps = ioread32(ctx->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
+ caps &= GENMASK(2, 0);
+
+ /* Check HW supported vs property value */
+ if (caps < ctx->count) {
+ dev_err(&adev->dev,
+ "BIOS master count is larger than hardware capabilities\n");
+ return -EINVAL;
+ }
+
+ if (!ctx->links)
+ return -EINVAL;
+
+ link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ /* Startup SDW Master devices */
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
+ intel_master_startup(link->pdev);
+ }
+
+ return 0;
+}
+
static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
void *cdata, void **return_value)
{
- struct sdw_intel_res *res = cdata;
+ struct sdw_intel_acpi_info *info = cdata;
struct acpi_device *adev;
acpi_status status;
u64 adr;
@@ -168,7 +356,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
return AE_NOT_FOUND;
}
- res->handle = handle;
+ info->handle = handle;
/*
* On some Intel platforms, multiple children of the HDAS
@@ -185,39 +373,93 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
}
/**
- * sdw_intel_init() - SoundWire Intel init routine
+ * sdw_intel_acpi_scan() - SoundWire Intel init routine
* @parent_handle: ACPI parent handle
- * @res: resource data
+ * @info: description of what firmware/DSDT tables expose
*
- * This scans the namespace and creates SoundWire link controller devices
- * based on the info queried.
+ * This scans the namespace and queries firmware to figure out which
+ * links to enable. A follow-up use of sdw_intel_probe() and
+ * sdw_intel_startup() is required for creation of devices and bus
+ * startup
*/
-void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
+int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+ struct sdw_intel_acpi_info *info)
{
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
parent_handle, 1,
sdw_intel_acpi_cb,
- NULL, res, NULL);
+ NULL, info, NULL);
if (ACPI_FAILURE(status))
- return NULL;
+ return -ENODEV;
- return sdw_intel_add_controller(res);
+ return sdw_intel_scan_controller(info);
}
+EXPORT_SYMBOL_NS(sdw_intel_acpi_scan, SOUNDWIRE_INTEL_INIT);
/**
+ * sdw_intel_probe() - SoundWire Intel probe routine
+ * @res: resource data
+ *
+ * This registers a platform device for each Master handled by the controller,
+ * and SoundWire Master and Slave devices will be created by the platform
+ * device probe. All the information necessary is stored in the context, and
+ * the res argument pointer can be freed after this step.
+ * This function will be called after sdw_intel_acpi_scan() by SOF probe.
+ */
+struct sdw_intel_ctx
+*sdw_intel_probe(struct sdw_intel_res *res)
+{
+ return sdw_intel_probe_controller(res);
+}
+EXPORT_SYMBOL_NS(sdw_intel_probe, SOUNDWIRE_INTEL_INIT);
+
+/**
+ * sdw_intel_startup() - SoundWire Intel startup
+ * @ctx: SoundWire context allocated in the probe
+ *
+ * Startup Intel SoundWire controller. This function will be called after
+ * Intel Audio DSP is powered up.
+ */
+int sdw_intel_startup(struct sdw_intel_ctx *ctx)
+{
+ return sdw_intel_startup_controller(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_intel_startup, SOUNDWIRE_INTEL_INIT);
+/**
* sdw_intel_exit() - SoundWire Intel exit
- * @arg: callback context
+ * @ctx: SoundWire context allocated in the probe
*
* Delete the controller instances created and cleanup
*/
void sdw_intel_exit(struct sdw_intel_ctx *ctx)
{
- sdw_intel_cleanup_pdev(ctx);
- kfree(ctx);
+ sdw_intel_cleanup(ctx);
+}
+EXPORT_SYMBOL_NS(sdw_intel_exit, SOUNDWIRE_INTEL_INIT);
+
+void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx)
+{
+ struct sdw_intel_link_res *link;
+ u32 link_mask;
+ int i;
+
+ if (!ctx->links)
+ return;
+
+ link = ctx->links;
+ link_mask = ctx->link_mask;
+
+ /* Startup SDW Master devices */
+ for (i = 0; i < ctx->count; i++, link++) {
+ if (!(link_mask & BIT(i)))
+ continue;
+
+ intel_master_process_wakeen_event(link->pdev);
+ }
}
-EXPORT_SYMBOL(sdw_intel_exit);
+EXPORT_SYMBOL_NS(sdw_intel_process_wakeen_event, SOUNDWIRE_INTEL_INIT);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Intel Soundwire Init Library");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index a1c2a44a3b4d..915c2cf0c274 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -406,13 +406,13 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
return ctrl->reg_write(ctrl, reg, val);
}
-static struct sdw_master_port_ops qcom_swrm_port_ops = {
+static const struct sdw_master_port_ops qcom_swrm_port_ops = {
.dpn_set_port_params = qcom_swrm_port_params,
.dpn_set_port_transport_params = qcom_swrm_transport_params,
.dpn_port_enable_ch = qcom_swrm_port_enable,
};
-static struct sdw_master_ops qcom_swrm_ops = {
+static const struct sdw_master_ops qcom_swrm_ops = {
.xfer_msg = qcom_swrm_xfer_msg,
.pre_bank_switch = qcom_swrm_pre_bank_switch,
};
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index a9a72574b34a..37290a799023 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
+#include <sound/soc.h>
#include "bus.h"
/*
@@ -1826,3 +1827,100 @@ state_err:
return ret;
}
EXPORT_SYMBOL(sdw_deprepare_stream);
+
+static int set_stream(struct snd_pcm_substream *substream,
+ struct sdw_stream_runtime *sdw_stream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai;
+ int ret = 0;
+ int i;
+
+ /* Set stream pointer on all DAIs */
+ for_each_rtd_dais(rtd, i, dai) {
+ ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ if (ret < 0) {
+ dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdw_startup_stream() - Startup SoundWire stream
+ *
+ * @sdw_substream: Soundwire stream
+ *
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
+ */
+int sdw_startup_stream(void *sdw_substream)
+{
+ struct snd_pcm_substream *substream = sdw_substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream;
+ char *name;
+ int ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ name = kasprintf(GFP_KERNEL, "%s-Playback", substream->name);
+ else
+ name = kasprintf(GFP_KERNEL, "%s-Capture", substream->name);
+
+ if (!name)
+ return -ENOMEM;
+
+ sdw_stream = sdw_alloc_stream(name);
+ if (!sdw_stream) {
+ dev_err(rtd->dev, "alloc stream failed for substream DAI %s", substream->name);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = set_stream(substream, sdw_stream);
+ if (ret < 0)
+ goto release_stream;
+ return 0;
+
+release_stream:
+ sdw_release_stream(sdw_stream);
+ set_stream(substream, NULL);
+error:
+ kfree(name);
+ return ret;
+}
+EXPORT_SYMBOL(sdw_startup_stream);
+
+/**
+ * sdw_shutdown_stream() - Shutdown SoundWire stream
+ *
+ * @sdw_substream: Soundwire stream
+ *
+ * Documentation/driver-api/soundwire/stream.rst explains this API in detail
+ */
+void sdw_shutdown_stream(void *sdw_substream)
+{
+ struct snd_pcm_substream *substream = sdw_substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sdw_stream_runtime *sdw_stream;
+ struct snd_soc_dai *dai;
+
+ /* Find stream from first CPU DAI */
+ dai = asoc_rtd_to_cpu(rtd, 0);
+
+ sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
+
+ if (!sdw_stream) {
+ dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
+ return;
+ }
+
+ /* release memory */
+ kfree(sdw_stream->name);
+ sdw_release_stream(sdw_stream);
+
+ /* clear DAI data */
+ set_stream(substream, NULL);
+}
+EXPORT_SYMBOL(sdw_shutdown_stream);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8f1f8fca79e3..c3008e423f59 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -59,6 +59,7 @@ comment "SPI Master Controller Drivers"
config SPI_ALTERA
tristate "Altera SPI Controller"
+ select REGMAP_MMIO
help
This is the driver for the Altera SPI Controller.
@@ -102,7 +103,7 @@ config SPI_AT91_USART
config SPI_ATMEL_QUADSPI
tristate "Atmel Quad SPI Controller"
- depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
+ depends on ARCH_AT91 || COMPILE_TEST
depends on OF && HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
@@ -149,13 +150,13 @@ config SPI_BCM2835AUX
config SPI_BCM63XX
tristate "Broadcom BCM63xx SPI controller"
- depends on BCM63XX || COMPILE_TEST
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
help
Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
- depends on BCM63XX || ARCH_BCM_63XX || COMPILE_TEST
+ depends on BCM63XX || BMIPS_GENERIC || ARCH_BCM_63XX || COMPILE_TEST
help
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
@@ -168,7 +169,7 @@ config SPI_BCM_QSPI
help
Enables support for the Broadcom SPI flash and MSPI controller.
Select this option for any one of BRCMSTB, iProc NSP and NS2 SoCs
- based platforms. This driver works for both SPI master for spi-nor
+ based platforms. This driver works for both SPI master for SPI NOR
flash device as well as MSPI device.
config SPI_BITBANG
@@ -200,6 +201,17 @@ config SPI_CADENCE
This selects the Cadence SPI controller master driver
used by Xilinx Zynq and ZynqMP.
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
depends on ARCH_CLPS711X || COMPILE_TEST
@@ -299,11 +311,11 @@ config SPI_FSL_QUADSPI
supports the high-level SPI memory interface.
config SPI_HISI_SFC_V3XX
- tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+ tristate "HiSilicon SPI NOR Flash Controller for Hi16XX chipsets"
depends on (ARM64 && ACPI) || COMPILE_TEST
depends on HAS_IOMEM
help
- This enables support for HiSilicon v3xx SPI-NOR flash controller
+ This enables support for HiSilicon v3xx SPI NOR flash controller
found in hi16xx chipsets.
config SPI_NXP_FLEXSPI
@@ -465,9 +477,9 @@ config SPI_MTK_NOR
depends on ARCH_MEDIATEK || COMPILE_TEST
help
This enables support for SPI NOR controller found on MediaTek
- ARM SoCs. This is a controller specifically for SPI-NOR flash.
+ ARM SoCs. This is a controller specifically for SPI NOR flash.
It can perform generic SPI transfers up to 6 bytes via generic
- SPI interface as well as several SPI-NOR specific instructions
+ SPI interface as well as several SPI NOR specific instructions
via SPI MEM interface.
config SPI_NPCM_FIU
@@ -489,11 +501,11 @@ config SPI_NPCM_PSPI
config SPI_LANTIQ_SSC
tristate "Lantiq SSC SPI controller"
- depends on LANTIQ || COMPILE_TEST
+ depends on LANTIQ || X86 || COMPILE_TEST
help
This driver supports the Lantiq SSC SPI controller in master
mode. This controller is found on Intel (former Lantiq) SoCs like
- the Danube, Falcon, xRX200, xRX300.
+ the Danube, Falcon, xRX200, xRX300, Lightning Mountain.
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
@@ -605,6 +617,12 @@ config SPI_RB4XX
help
SPI controller driver for the Mikrotik RB4xx series boards.
+config SPI_RPCIF
+ tristate "Renesas RPC-IF SPI driver"
+ depends on RENESAS_RPCIF
+ help
+ SPI driver for Renesas R-Car Gen3 RPC-IF.
+
config SPI_RSPI
tristate "Renesas RSPI/QSPI controller"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d2e41d3d464a..cf955ea803cd 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
@@ -92,6 +93,7 @@ obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
+obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index cb44d1e169aa..8c009c175f2c 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -285,6 +285,12 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
op->dummy.nbytes == 0)
return false;
+ /* DTR ops not supported. */
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ return false;
+ if (op->cmd.nbytes != 1)
+ return false;
+
return true;
}
@@ -424,11 +430,11 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
- _memcpy_fromio(op->data.buf.in, aq->mem + offset,
- op->data.nbytes);
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
else
- _memcpy_toio(aq->mem + offset, op->data.buf.out,
- op->data.nbytes);
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
/* Release the chip-select */
atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 41d71ba7fd32..809bfff3690a 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/spi/altera.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -40,19 +41,61 @@
#define ALTERA_SPI_CONTROL_IE_MSK 0x100
#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+#define ALTERA_SPI_MAX_CS 32
+
+enum altera_spi_type {
+ ALTERA_SPI_TYPE_UNKNOWN,
+ ALTERA_SPI_TYPE_SUBDEV,
+};
+
struct altera_spi {
- void __iomem *base;
int irq;
int len;
int count;
int bytes_per_word;
- unsigned long imr;
+ u32 imr;
/* data buffers */
const unsigned char *tx;
unsigned char *rx;
+
+ struct regmap *regmap;
+ u32 regoff;
+ struct device *dev;
+};
+
+static const struct regmap_config spi_altera_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
};
+static int altr_spi_writel(struct altera_spi *hw, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(hw->regmap, hw->regoff + reg, val);
+ if (ret)
+ dev_err(hw->dev, "fail to write reg 0x%x val 0x%x: %d\n",
+ reg, val, ret);
+
+ return ret;
+}
+
+static int altr_spi_readl(struct altera_spi *hw, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(hw->regmap, hw->regoff + reg, val);
+ if (ret)
+ dev_err(hw->dev, "fail to read reg 0x%x: %d\n", reg, ret);
+
+ return ret;
+}
+
static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
{
return spi_master_get_devdata(sdev->master);
@@ -64,12 +107,13 @@ static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
if (is_high) {
hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
- writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
- writel(0, hw->base + ALTERA_SPI_SLAVE_SEL);
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+ altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, 0);
} else {
- writel(BIT(spi->chip_select), hw->base + ALTERA_SPI_SLAVE_SEL);
+ altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL,
+ BIT(spi->chip_select));
hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
- writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
}
}
@@ -86,17 +130,24 @@ static void altera_spi_tx_word(struct altera_spi *hw)
txd = (hw->tx[hw->count * 2]
| (hw->tx[hw->count * 2 + 1] << 8));
break;
+ case 4:
+ txd = (hw->tx[hw->count * 4]
+ | (hw->tx[hw->count * 4 + 1] << 8)
+ | (hw->tx[hw->count * 4 + 2] << 16)
+ | (hw->tx[hw->count * 4 + 3] << 24));
+ break;
+
}
}
- writel(txd, hw->base + ALTERA_SPI_TXDATA);
+ altr_spi_writel(hw, ALTERA_SPI_TXDATA, txd);
}
static void altera_spi_rx_word(struct altera_spi *hw)
{
unsigned int rxd;
- rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ altr_spi_readl(hw, ALTERA_SPI_RXDATA, &rxd);
if (hw->rx) {
switch (hw->bytes_per_word) {
case 1:
@@ -106,6 +157,13 @@ static void altera_spi_rx_word(struct altera_spi *hw)
hw->rx[hw->count * 2] = rxd;
hw->rx[hw->count * 2 + 1] = rxd >> 8;
break;
+ case 4:
+ hw->rx[hw->count * 4] = rxd;
+ hw->rx[hw->count * 4 + 1] = rxd >> 8;
+ hw->rx[hw->count * 4 + 2] = rxd >> 16;
+ hw->rx[hw->count * 4 + 3] = rxd >> 24;
+ break;
+
}
}
@@ -116,6 +174,7 @@ static int altera_spi_txrx(struct spi_master *master,
struct spi_device *spi, struct spi_transfer *t)
{
struct altera_spi *hw = spi_master_get_devdata(master);
+ u32 val;
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
@@ -126,7 +185,7 @@ static int altera_spi_txrx(struct spi_master *master,
if (hw->irq >= 0) {
/* enable receive interrupt */
hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
- writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
/* send the first byte */
altera_spi_tx_word(hw);
@@ -134,9 +193,13 @@ static int altera_spi_txrx(struct spi_master *master,
while (hw->count < hw->len) {
altera_spi_tx_word(hw);
- while (!(readl(hw->base + ALTERA_SPI_STATUS) &
- ALTERA_SPI_STATUS_RRDY_MSK))
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
+
cpu_relax();
+ }
altera_spi_rx_word(hw);
}
@@ -158,7 +221,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
} else {
/* disable receive interrupt */
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
- writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
spi_finalize_current_transfer(master);
}
@@ -168,9 +231,14 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
static int altera_spi_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ enum altera_spi_type type = ALTERA_SPI_TYPE_UNKNOWN;
struct altera_spi *hw;
struct spi_master *master;
int err = -ENODEV;
+ u32 val;
+ u16 i;
master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
if (!master)
@@ -178,27 +246,72 @@ static int altera_spi_probe(struct platform_device *pdev)
/* setup the master state. */
master->bus_num = pdev->id;
- master->num_chipselect = 16;
- master->mode_bits = SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+
+ if (pdata) {
+ if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
+ dev_err(&pdev->dev,
+ "Invalid number of chipselect: %hu\n",
+ pdata->num_chipselect);
+ return -EINVAL;
+ }
+
+ master->num_chipselect = pdata->num_chipselect;
+ master->mode_bits = pdata->mode_bits;
+ master->bits_per_word_mask = pdata->bits_per_word_mask;
+ } else {
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ }
+
master->dev.of_node = pdev->dev.of_node;
master->transfer_one = altera_spi_txrx;
master->set_cs = altera_spi_set_cs;
hw = spi_master_get_devdata(master);
+ hw->dev = &pdev->dev;
+
+ if (platid)
+ type = platid->driver_data;
/* find and map our resources */
- hw->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hw->base)) {
- err = PTR_ERR(hw->base);
- goto exit;
+ if (type == ALTERA_SPI_TYPE_SUBDEV) {
+ struct resource *regoff;
+
+ hw->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!hw->regmap) {
+ dev_err(&pdev->dev, "get regmap failed\n");
+ goto exit;
+ }
+
+ regoff = platform_get_resource(pdev, IORESOURCE_REG, 0);
+ if (regoff)
+ hw->regoff = regoff->start;
+ } else {
+ void __iomem *res;
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto exit;
+ }
+
+ hw->regmap = devm_regmap_init_mmio(&pdev->dev, res,
+ &spi_altera_config);
+ if (IS_ERR(hw->regmap)) {
+ dev_err(&pdev->dev, "regmap mmio init failed\n");
+ err = PTR_ERR(hw->regmap);
+ goto exit;
+ }
}
+
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
- writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
- writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */
- if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK)
- readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */
+ altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
+ altr_spi_writel(hw, ALTERA_SPI_STATUS, 0); /* clear status reg */
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
@@ -211,7 +324,17 @@ static int altera_spi_probe(struct platform_device *pdev)
err = devm_spi_register_master(&pdev->dev, master);
if (err)
goto exit;
- dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ if (pdata) {
+ for (i = 0; i < pdata->num_devices; i++) {
+ if (!spi_new_device(master, pdata->devices + i))
+ dev_warn(&pdev->dev,
+ "unable to create SPI device: %s\n",
+ pdata->devices[i].modalias);
+ }
+ }
+
+ dev_info(&pdev->dev, "regoff %u, irq %d\n", hw->regoff, hw->irq);
return 0;
exit:
@@ -228,6 +351,13 @@ static const struct of_device_id altera_spi_match[] = {
MODULE_DEVICE_TABLE(of, altera_spi_match);
#endif /* CONFIG_OF */
+static const struct platform_device_id altera_spi_ids[] = {
+ { DRV_NAME, ALTERA_SPI_TYPE_UNKNOWN },
+ { "subdev_spi_altera", ALTERA_SPI_TYPE_SUBDEV },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, altera_spi_ids);
+
static struct platform_driver altera_spi_driver = {
.probe = altera_spi_probe,
.driver = {
@@ -235,6 +365,7 @@ static struct platform_driver altera_spi_driver = {
.pm = NULL,
.of_match_table = of_match_ptr(altera_spi_match),
},
+ .id_table = altera_spi_ids,
};
module_platform_driver(altera_spi_driver);
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index d0aacd4de1b9..7f629544060d 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -294,11 +294,13 @@ err_free_master:
return err;
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
{ "AMDI0061", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
+#endif
static struct platform_driver amd_spi_driver = {
.driver = {
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index 88033422a42a..8c8352625d23 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -681,13 +681,6 @@ static const struct dev_pm_ops at91_usart_spi_pm_ops = {
at91_usart_spi_runtime_resume, NULL)
};
-static const struct of_device_id at91_usart_spi_dt_ids[] = {
- { .compatible = "microchip,at91sam9g45-usart-spi"},
- { /* sentinel */}
-};
-
-MODULE_DEVICE_TABLE(of, at91_usart_spi_dt_ids);
-
static struct platform_driver at91_usart_spi_driver = {
.driver = {
.name = "at91_usart_spi",
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 57ee8c3b7972..2cfe6253a784 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1546,10 +1546,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
/* setup spi core then atmel-specific driver state */
- ret = -ENOMEM;
master = spi_alloc_master(&pdev->dev, sizeof(*as));
if (!master)
- goto out_free;
+ return -ENOMEM;
/* the spi->mode bits understood by this driver: */
master->use_gpio_descriptors = true;
@@ -1678,7 +1677,6 @@ out_free_dma:
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
-out_free:
spi_master_put(master);
return ret;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 237bd306c268..c45d76c848c8 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -86,6 +86,7 @@ MODULE_PARM_DESC(polling_limit_us,
* @clk: core clock, divided to calculate serial clock
* @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
* @tfr: SPI transfer currently processed
+ * @ctlr: SPI controller reverse lookup
* @tx_buf: pointer whence next transmitted byte is read
* @rx_buf: pointer where next received byte is written
* @tx_len: remaining bytes to transmit
@@ -125,6 +126,7 @@ struct bcm2835_spi {
struct clk *clk;
int irq;
struct spi_transfer *tfr;
+ struct spi_controller *ctlr;
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
@@ -243,13 +245,13 @@ static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
bs->rx_len -= count;
- while (count > 0) {
+ do {
val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
len = min(count, 4);
memcpy(bs->rx_buf, &val, len);
bs->rx_buf += len;
count -= 4;
- }
+ } while (count > 0);
}
/**
@@ -269,7 +271,7 @@ static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
bs->tx_len -= count;
- while (count > 0) {
+ do {
if (bs->tx_buf) {
len = min(count, 4);
memcpy(&val, bs->tx_buf, len);
@@ -279,7 +281,7 @@ static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
}
bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
count -= 4;
- }
+ } while (count > 0);
}
/**
@@ -308,12 +310,11 @@ static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
count = min(count, bs->rx_len);
bs->rx_len -= count;
- while (count) {
+ do {
val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
if (bs->rx_buf)
*bs->rx_buf++ = val;
- count--;
- }
+ } while (--count);
}
/**
@@ -328,16 +329,14 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
count = min(count, bs->tx_len);
bs->tx_len -= count;
- while (count) {
+ do {
val = bs->tx_buf ? *bs->tx_buf++ : 0;
bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
- count--;
- }
+ } while (--count);
}
-static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
+static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
{
- struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Disable SPI interrupts and transfer */
@@ -363,8 +362,7 @@ static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
- struct spi_controller *ctlr = dev_id;
- struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ struct bcm2835_spi *bs = dev_id;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/*
@@ -386,9 +384,9 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
/* wake up the framework */
- complete(&ctlr->xfer_completion);
+ complete(&bs->ctlr->xfer_completion);
}
return IRQ_HANDLED;
@@ -607,7 +605,7 @@ static void bcm2835_spi_dma_rx_done(void *data)
bcm2835_spi_undo_prologue(bs);
/* reset fifo and HW */
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
/* and mark as completed */;
complete(&ctlr->xfer_completion);
@@ -641,7 +639,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
dmaengine_terminate_async(ctlr->dma_rx);
bcm2835_spi_undo_prologue(bs);
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
complete(&ctlr->xfer_completion);
}
@@ -825,14 +823,14 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
if (!bs->rx_buf && !bs->tx_dma_active &&
cmpxchg(&bs->rx_dma_active, true, false)) {
dmaengine_terminate_async(ctlr->dma_rx);
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
}
/* wait for wakeup in framework */
return 1;
err_reset_hw:
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
bcm2835_spi_undo_prologue(bs);
return ret;
}
@@ -1074,7 +1072,7 @@ static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
}
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
/* and return without waiting for completion */
return 0;
}
@@ -1084,7 +1082,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
+ unsigned long spi_hz, clk_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
u32 cs = bs->prepare_cs[spi->chip_select];
@@ -1104,7 +1102,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
} else {
cdiv = 0; /* 0 is the slowest we can go */
}
- spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+ tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
@@ -1124,7 +1122,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
* per 300,000 Hz of bus clock.
*/
hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
- byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
+ byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (tfr->len < byte_limit)
@@ -1182,7 +1180,7 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
bcm2835_spi_undo_prologue(bs);
/* and reset */
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_spi_reset_hw(bs);
}
static int chip_match_name(struct gpio_chip *chip, void *data)
@@ -1311,6 +1309,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
bs = spi_controller_get_devdata(ctlr);
+ bs->ctlr = ctlr;
bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs)) {
@@ -1345,7 +1344,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), ctlr);
+ dev_name(&pdev->dev), bs);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_dma_release;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index c331efd6e86b..2f717812c766 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -345,7 +345,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- unsigned long spi_hz, clk_hz, speed, spi_used_hz;
+ unsigned long spi_hz, clk_hz, speed;
unsigned long hz_per_byte, byte_limit;
/* calculate the registers to handle
@@ -374,7 +374,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
/* set the new speed */
bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
- spi_used_hz = clk_hz / (2 * (speed + 1));
+ tfr->effective_speed_hz = clk_hz / (2 * (speed + 1));
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
@@ -391,7 +391,7 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
* 30 µs per 300,000 Hz of bus clock.
*/
hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
- byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
+ byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
/* run in polling mode for short transfers */
if (tfr->len < byte_limit)
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 6c235306c0e4..9909b18f3c5a 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/reset.h>
#define HSSPI_GLOBAL_CTRL_REG 0x0
#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
@@ -334,6 +335,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
struct clk *clk, *pll_clk = NULL;
int irq, ret;
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
+ struct reset_control *reset;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -348,10 +350,20 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
+ reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
ret = clk_prepare_enable(clk);
if (ret)
return ret;
+ ret = reset_control_reset(reset);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ goto out_disable_clk;
+ }
+
rate = clk_get_rate(clk);
if (!rate) {
pll_clk = devm_clk_get(dev, "pll");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 0f1b10a4ef0c..96d075e633f4 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/reset.h>
/* BCM 6338/6348 SPI core */
#define SPI_6348_RSET_SIZE 64
@@ -493,6 +494,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
struct bcm63xx_spi *bs;
int ret;
u32 num_cs = BCM63XX_SPI_MAX_CS;
+ struct reset_control *reset;
if (dev->of_node) {
const struct of_device_id *match;
@@ -529,6 +531,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
+ reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
master = spi_alloc_master(dev, sizeof(*bs));
if (!master) {
dev_err(dev, "out of memory\n");
@@ -579,6 +585,12 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
if (ret)
goto out_err;
+ ret = reset_control_reset(reset);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ goto out_clk_disable;
+ }
+
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 68491a8bf7b5..1a7352abd878 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -174,7 +174,7 @@ int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
-/**
+/*
* spi_bitbang_setup - default setup for per-word I/O loops
*/
int spi_bitbang_setup(struct spi_device *spi)
@@ -208,7 +208,7 @@ int spi_bitbang_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);
-/**
+/*
* spi_bitbang_cleanup - default cleanup for per-word I/O loops
*/
void spi_bitbang_cleanup(struct spi_device *spi)
@@ -427,7 +427,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
-/**
+/*
* spi_bitbang_stop - stops the task providing spi communication
*/
void spi_bitbang_stop(struct spi_bitbang *bitbang)
diff --git a/drivers/mtd/spi-nor/controllers/cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 494dcab4aaaa..1c1a9d17eec0 100644
--- a/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for Cadence QSPI Controller
- *
- * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
- */
+//
+// Driver for Cadence QSPI Controller
+//
+// Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+// Copyright Intel Corporation (C) 2019-2020. All rights reserved.
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -17,9 +19,6 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -27,6 +26,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <linux/timer.h>
#define CQSPI_NAME "cadence-qspi"
@@ -34,17 +34,14 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+#define CQSPI_DISABLE_DAC_MODE BIT(1)
-/* Capabilities mask */
-#define CQSPI_BASE_HWCAPS_MASK \
- (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \
- SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \
- SNOR_HWCAPS_PP)
+/* Capabilities */
+#define CQSPI_SUPPORTS_OCTAL BIT(0)
struct cqspi_st;
struct cqspi_flash_pdata {
- struct spi_nor nor;
struct cqspi_st *cqspi;
u32 clk_rate;
u32 read_delay;
@@ -56,8 +53,6 @@ struct cqspi_flash_pdata {
u8 addr_width;
u8 data_width;
u8 cs;
- bool registered;
- bool use_direct_mode;
};
struct cqspi_st {
@@ -70,16 +65,12 @@ struct cqspi_st {
void __iomem *ahb_base;
resource_size_t ahb_size;
struct completion transfer_complete;
- struct mutex bus_mutex;
struct dma_chan *rx_chan;
struct completion rx_dma_complete;
dma_addr_t mmap_phys_base;
int current_cs;
- int current_page_size;
- int current_erase_size;
- int current_addr_width;
unsigned long master_ref_clk_hz;
bool is_decoded_cs;
u32 fifo_depth;
@@ -87,6 +78,7 @@ struct cqspi_st {
bool rclk_en;
u32 trigger_address;
u32 wr_delay;
+ bool use_direct_mode;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -285,9 +277,8 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
return IRQ_HANDLED;
}
-static unsigned int cqspi_calc_rdreg(struct spi_nor *nor)
+static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
u32 rdreg = 0;
rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
@@ -354,19 +345,21 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
-static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
- u8 *rxbuf, size_t n_rx)
+static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
+ u8 *rxbuf = op->data.buf.in;
+ u8 opcode = op->cmd.opcode;
+ size_t n_rx = op->data.nbytes;
unsigned int rdreg;
unsigned int reg;
size_t read_len;
int status;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
- dev_err(nor->dev,
+ dev_err(&cqspi->pdev->dev,
"Invalid input argument, len %zu rxbuf 0x%p\n",
n_rx, rxbuf);
return -EINVAL;
@@ -374,7 +367,7 @@ static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- rdreg = cqspi_calc_rdreg(nor);
+ rdreg = cqspi_calc_rdreg(f_pdata);
writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
@@ -403,25 +396,36 @@ static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
return 0;
}
-static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
- const u8 *txbuf, size_t n_tx)
+static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
+ const u8 opcode = op->cmd.opcode;
+ const u8 *txbuf = op->data.buf.out;
+ size_t n_tx = op->data.nbytes;
unsigned int reg;
unsigned int data;
size_t write_len;
- int ret;
if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
- dev_err(nor->dev,
+ dev_err(&cqspi->pdev->dev,
"Invalid input argument, cmdlen %zu txbuf 0x%p\n",
n_tx, txbuf);
return -EINVAL;
}
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ if (op->addr.nbytes) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((op->addr.nbytes - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
+ }
+
if (n_tx) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
@@ -439,73 +443,46 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
}
}
- ret = cqspi_exec_flash_cmd(cqspi, reg);
- return ret;
-}
-
-static int cqspi_command_write_addr(struct spi_nor *nor,
- const u8 opcode, const unsigned int addr)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
- void __iomem *reg_base = cqspi->iobase;
- unsigned int reg;
-
- reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
- reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
- reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
- << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
-
- writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_read_setup(struct spi_nor *nor)
+static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int dummy_clk = 0;
unsigned int reg;
- reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
- reg |= cqspi_calc_rdreg(nor);
+ reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ reg |= cqspi_calc_rdreg(f_pdata);
/* Setup dummy clock cycles */
- dummy_clk = nor->read_dummy;
+ dummy_clk = op->dummy.nbytes * 8;
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
dummy_clk = CQSPI_DUMMY_CLKS_MAX;
- if (dummy_clk / 8) {
- reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
- /* Set mode bits high to ensure chip doesn't enter XIP */
- writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
-
- /* Need to subtract the mode byte (8 clocks). */
- if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
- dummy_clk -= 8;
-
- if (dummy_clk)
- reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
- << CQSPI_REG_RD_INSTR_DUMMY_LSB;
- }
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
/* Set address width */
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
- reg |= (nor->addr_width - 1);
+ reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
- loff_t from_addr, const size_t n_rx)
+static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
+ u8 *rxbuf, loff_t from_addr,
+ const size_t n_rx)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
void __iomem *reg_base = cqspi->iobase;
void __iomem *ahb_base = cqspi->ahb_base;
unsigned int remaining = n_rx;
@@ -528,13 +505,13 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
while (remaining > 0) {
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
- dev_err(nor->dev, "Indirect read timeout, no bytes\n");
+ dev_err(dev, "Indirect read timeout, no bytes\n");
goto failrd;
}
@@ -570,8 +547,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
if (ret) {
- dev_err(nor->dev,
- "Indirect read completion error (%i)\n", ret);
+ dev_err(dev, "Indirect read completion error (%i)\n", ret);
goto failrd;
}
@@ -593,32 +569,32 @@ failrd:
return ret;
}
-static int cqspi_write_setup(struct spi_nor *nor)
+static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
unsigned int reg;
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
/* Set opcode. */
- reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
- reg = cqspi_calc_rdreg(nor);
+ reg = cqspi_calc_rdreg(f_pdata);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
- reg |= (nor->addr_width - 1);
+ reg |= (op->addr.nbytes - 1);
writel(reg, reg_base + CQSPI_REG_SIZE);
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
- const u8 *txbuf, const size_t n_tx)
+static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
+ loff_t to_addr, const u8 *txbuf,
+ const size_t n_tx)
{
- const unsigned int page_size = nor->page_size;
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
void __iomem *reg_base = cqspi->iobase;
unsigned int remaining = n_tx;
unsigned int write_bytes;
@@ -648,7 +624,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
while (remaining > 0) {
size_t write_words, mod_bytes;
- write_bytes = remaining > page_size ? page_size : remaining;
+ write_bytes = remaining;
write_words = write_bytes / 4;
mod_bytes = write_bytes % 4;
/* Write 4 bytes at a time then single bytes. */
@@ -665,8 +641,8 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
}
if (!wait_for_completion_timeout(&cqspi->transfer_complete,
- msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
- dev_err(nor->dev, "Indirect write timeout\n");
+ msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
+ dev_err(dev, "Indirect write timeout\n");
ret = -ETIMEDOUT;
goto failwr;
}
@@ -681,8 +657,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
if (ret) {
- dev_err(nor->dev,
- "Indirect write completion error (%i)\n", ret);
+ dev_err(dev, "Indirect write completion error (%i)\n", ret);
goto failwr;
}
@@ -706,9 +681,8 @@ failwr:
return ret;
}
-static void cqspi_chipselect(struct spi_nor *nor)
+static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *reg_base = cqspi->iobase;
unsigned int chip_select = f_pdata->cs;
@@ -736,32 +710,6 @@ static void cqspi_chipselect(struct spi_nor *nor)
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
-static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
- void __iomem *iobase = cqspi->iobase;
- unsigned int reg;
-
- /* configure page size and block size. */
- reg = readl(iobase + CQSPI_REG_SIZE);
- reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
- reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
- reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
- reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
- reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
- reg |= (nor->addr_width - 1);
- writel(reg, iobase + CQSPI_REG_SIZE);
-
- /* configure the chip select */
- cqspi_chipselect(nor);
-
- /* Store the new configuration of the controller */
- cqspi->current_page_size = nor->page_size;
- cqspi->current_erase_size = nor->mtd.erasesize;
- cqspi->current_addr_width = nor->addr_width;
-}
-
static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
const unsigned int ns_val)
{
@@ -773,9 +721,8 @@ static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
return ticks;
}
-static void cqspi_delay(struct spi_nor *nor)
+static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
void __iomem *iobase = cqspi->iobase;
const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
@@ -859,33 +806,27 @@ static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
-static void cqspi_configure(struct spi_nor *nor)
+static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
+ unsigned long sclk)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
- const unsigned int sclk = f_pdata->clk_rate;
int switch_cs = (cqspi->current_cs != f_pdata->cs);
int switch_ck = (cqspi->sclk != sclk);
- if ((cqspi->current_page_size != nor->page_size) ||
- (cqspi->current_erase_size != nor->mtd.erasesize) ||
- (cqspi->current_addr_width != nor->addr_width))
- switch_cs = 1;
-
if (switch_cs || switch_ck)
cqspi_controller_enable(cqspi, 0);
/* Switch chip select. */
if (switch_cs) {
cqspi->current_cs = f_pdata->cs;
- cqspi_configure_cs_and_sizes(nor);
+ cqspi_chipselect(f_pdata);
}
/* Setup baudrate divisor and delays */
if (switch_ck) {
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
- cqspi_delay(nor);
+ cqspi_delay(f_pdata);
cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
f_pdata->read_delay);
}
@@ -894,26 +835,25 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi_controller_enable(cqspi, 1);
}
-static int cqspi_set_protocol(struct spi_nor *nor, const int read)
+static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
-
f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
- if (read) {
- switch (nor->read_proto) {
- case SNOR_PROTO_1_1_1:
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ switch (op->data.buswidth) {
+ case 1:
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
break;
- case SNOR_PROTO_1_1_2:
+ case 2:
f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
break;
- case SNOR_PROTO_1_1_4:
+ case 4:
f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
break;
- case SNOR_PROTO_1_1_8:
+ case 8:
f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
break;
default:
@@ -921,36 +861,32 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
}
}
- cqspi_configure(nor);
-
return 0;
}
-static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *buf)
+static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
+ loff_t to = op->addr.val;
+ size_t len = op->data.nbytes;
+ const u_char *buf = op->data.buf.out;
int ret;
- ret = cqspi_set_protocol(nor, 0);
+ ret = cqspi_set_protocol(f_pdata, op);
if (ret)
return ret;
- ret = cqspi_write_setup(nor);
+ ret = cqspi_write_setup(f_pdata, op);
if (ret)
return ret;
- if (f_pdata->use_direct_mode) {
+ if (cqspi->use_direct_mode && ((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
- ret = cqspi_wait_idle(cqspi);
- } else {
- ret = cqspi_indirect_write_execute(nor, to, buf, len);
+ return cqspi_wait_idle(cqspi);
}
- if (ret)
- return ret;
- return len;
+ return cqspi_indirect_write_execute(f_pdata, to, buf, len);
}
static void cqspi_rx_dma_callback(void *param)
@@ -960,11 +896,11 @@ static void cqspi_rx_dma_callback(void *param)
complete(&cqspi->rx_dma_complete);
}
-static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
- loff_t from, size_t len)
+static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
+ u_char *buf, loff_t from, size_t len)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
int ret = 0;
@@ -977,15 +913,15 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
return 0;
}
- dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(nor->dev, dma_dst)) {
- dev_err(nor->dev, "dma mapping failed\n");
+ dma_dst = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "dma mapping failed\n");
return -ENOMEM;
}
tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
len, flags);
if (!tx) {
- dev_err(nor->dev, "device_prep_dma_memcpy error\n");
+ dev_err(dev, "device_prep_dma_memcpy error\n");
ret = -EIO;
goto err_unmap;
}
@@ -997,7 +933,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
ret = dma_submit_error(cookie);
if (ret) {
- dev_err(nor->dev, "dma_submit_error %d\n", cookie);
+ dev_err(dev, "dma_submit_error %d\n", cookie);
ret = -EIO;
goto err_unmap;
}
@@ -1006,99 +942,68 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
msecs_to_jiffies(len))) {
dmaengine_terminate_sync(cqspi->rx_chan);
- dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
+ dev_err(dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
goto err_unmap;
}
err_unmap:
- dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, dma_dst, len, DMA_FROM_DEVICE);
return ret;
}
-static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
- size_t len, u_char *buf)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- int ret;
-
- ret = cqspi_set_protocol(nor, 1);
- if (ret)
- return ret;
-
- ret = cqspi_read_setup(nor);
- if (ret)
- return ret;
-
- if (f_pdata->use_direct_mode)
- ret = cqspi_direct_read_execute(nor, buf, from, len);
- else
- ret = cqspi_indirect_read_execute(nor, buf, from, len);
- if (ret)
- return ret;
-
- return len;
-}
-
-static int cqspi_erase(struct spi_nor *nor, loff_t offs)
+static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
+ const struct spi_mem_op *op)
{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ u_char *buf = op->data.buf.in;
int ret;
- ret = cqspi_set_protocol(nor, 0);
+ ret = cqspi_set_protocol(f_pdata, op);
if (ret)
return ret;
- /* Send write enable, then erase commands. */
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = cqspi_read_setup(f_pdata, op);
if (ret)
return ret;
- /* Set up command buffer. */
- ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
- if (ret)
- return ret;
+ if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
+ return cqspi_direct_read_execute(f_pdata, buf, from, len);
- return 0;
+ return cqspi_indirect_read_execute(f_pdata, buf, from, len);
}
-static int cqspi_prep(struct spi_nor *nor)
+static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
-
- mutex_lock(&cqspi->bus_mutex);
-
- return 0;
-}
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_flash_pdata *f_pdata;
-static void cqspi_unprep(struct spi_nor *nor)
-{
- struct cqspi_flash_pdata *f_pdata = nor->priv;
- struct cqspi_st *cqspi = f_pdata->cqspi;
+ f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
+ cqspi_configure(f_pdata, mem->spi->max_speed_hz);
- mutex_unlock(&cqspi->bus_mutex);
-}
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
+ if (!op->addr.nbytes)
+ return cqspi_command_read(f_pdata, op);
-static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
-{
- int ret;
+ return cqspi_read(f_pdata, op);
+ }
- ret = cqspi_set_protocol(nor, 0);
- if (!ret)
- ret = cqspi_command_read(nor, opcode, buf, len);
+ if (!op->addr.nbytes || !op->data.buf.out)
+ return cqspi_command_write(f_pdata, op);
- return ret;
+ return cqspi_write(f_pdata, op);
}
-static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
- size_t len)
+static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
- ret = cqspi_set_protocol(nor, 0);
- if (!ret)
- ret = cqspi_command_write(nor, opcode, buf, len);
+ ret = cqspi_mem_process(mem, op);
+ if (ret)
+ dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
return ret;
}
@@ -1140,26 +1045,26 @@ static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
return 0;
}
-static int cqspi_of_get_pdata(struct platform_device *pdev)
+static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
{
- struct device_node *np = pdev->dev.of_node;
- struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ struct device *dev = &cqspi->pdev->dev;
+ struct device_node *np = dev->of_node;
cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
- dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
+ dev_err(dev, "couldn't determine fifo-depth\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
- dev_err(&pdev->dev, "couldn't determine fifo-width\n");
+ dev_err(dev, "couldn't determine fifo-width\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,trigger-address",
&cqspi->trigger_address)) {
- dev_err(&pdev->dev, "couldn't determine trigger-address\n");
+ dev_err(dev, "couldn't determine trigger-address\n");
return -ENXIO;
}
@@ -1202,7 +1107,7 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
cqspi_controller_enable(cqspi, 1);
}
-static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
{
dma_cap_mask_t mask;
@@ -1211,53 +1116,42 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
- dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+ int ret = PTR_ERR(cqspi->rx_chan);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
cqspi->rx_chan = NULL;
+ return ret;
}
init_completion(&cqspi->rx_dma_complete);
+
+ return 0;
}
-static const struct spi_nor_controller_ops cqspi_controller_ops = {
- .prepare = cqspi_prep,
- .unprepare = cqspi_unprep,
- .read_reg = cqspi_read_reg,
- .write_reg = cqspi_write_reg,
- .read = cqspi_read,
- .write = cqspi_write,
- .erase = cqspi_erase,
+static const struct spi_controller_mem_ops cqspi_mem_ops = {
+ .exec_op = cqspi_exec_mem_op,
};
-static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+static int cqspi_setup_flash(struct cqspi_st *cqspi)
{
struct platform_device *pdev = cqspi->pdev;
struct device *dev = &pdev->dev;
- const struct cqspi_driver_platdata *ddata;
- struct spi_nor_hwcaps hwcaps;
+ struct device_node *np = dev->of_node;
struct cqspi_flash_pdata *f_pdata;
- struct spi_nor *nor;
- struct mtd_info *mtd;
unsigned int cs;
- int i, ret;
-
- ddata = of_device_get_match_data(dev);
- if (!ddata) {
- dev_err(dev, "Couldn't find driver data\n");
- return -EINVAL;
- }
- hwcaps.mask = ddata->hwcaps_mask;
+ int ret;
/* Get flash device data */
for_each_available_child_of_node(dev->of_node, np) {
ret = of_property_read_u32(np, "reg", &cs);
if (ret) {
dev_err(dev, "Couldn't determine chip select.\n");
- goto err;
+ return ret;
}
if (cs >= CQSPI_MAX_CHIPSELECT) {
- ret = -EINVAL;
dev_err(dev, "Chip select %d out of range.\n", cs);
- goto err;
+ return -EINVAL;
}
f_pdata = &cqspi->f_pdata[cs];
@@ -1266,86 +1160,51 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
if (ret)
- goto err;
-
- nor = &f_pdata->nor;
- mtd = &nor->mtd;
-
- mtd->priv = nor;
-
- nor->dev = dev;
- spi_nor_set_flash_node(nor, np);
- nor->priv = f_pdata;
- nor->controller_ops = &cqspi_controller_ops;
-
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
- dev_name(dev), cs);
- if (!mtd->name) {
- ret = -ENOMEM;
- goto err;
- }
-
- ret = spi_nor_scan(nor, NULL, &hwcaps);
- if (ret)
- goto err;
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret)
- goto err;
-
- f_pdata->registered = true;
-
- if (mtd->size <= cqspi->ahb_size) {
- f_pdata->use_direct_mode = true;
- dev_dbg(nor->dev, "using direct mode for %s\n",
- mtd->name);
-
- if (!cqspi->rx_chan)
- cqspi_request_mmap_dma(cqspi);
- }
+ return ret;
}
return 0;
-
-err:
- for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
- if (cqspi->f_pdata[i].registered)
- mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
- return ret;
}
static int cqspi_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ const struct cqspi_driver_platdata *ddata;
+ struct reset_control *rstc, *rstc_ocp;
struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct resource *res_ahb;
struct cqspi_st *cqspi;
struct resource *res;
- struct resource *res_ahb;
- struct reset_control *rstc, *rstc_ocp;
- const struct cqspi_driver_platdata *ddata;
int ret;
int irq;
- cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
- if (!cqspi)
+ master = spi_alloc_master(&pdev->dev, sizeof(*cqspi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master failed\n");
return -ENOMEM;
+ }
+ master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
+ master->mem_ops = &cqspi_mem_ops;
+ master->dev.of_node = pdev->dev.of_node;
+
+ cqspi = spi_master_get_devdata(master);
- mutex_init(&cqspi->bus_mutex);
cqspi->pdev = pdev;
- platform_set_drvdata(pdev, cqspi);
/* Obtain configuration from OF. */
- ret = cqspi_of_get_pdata(pdev);
+ ret = cqspi_of_get_pdata(cqspi);
if (ret) {
dev_err(dev, "Cannot get mandatory OF data.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto probe_master_put;
}
/* Obtain QSPI clock. */
cqspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
- return PTR_ERR(cqspi->clk);
+ ret = PTR_ERR(cqspi->clk);
+ goto probe_master_put;
}
/* Obtain and remap controller address. */
@@ -1353,7 +1212,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->iobase = devm_ioremap_resource(dev, res);
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
- return PTR_ERR(cqspi->iobase);
+ ret = PTR_ERR(cqspi->iobase);
+ goto probe_master_put;
}
/* Obtain and remap AHB address. */
@@ -1361,7 +1221,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
- return PTR_ERR(cqspi->ahb_base);
+ ret = PTR_ERR(cqspi->ahb_base);
+ goto probe_master_put;
}
cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
@@ -1370,14 +1231,16 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENXIO;
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto probe_master_put;
+ }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- return ret;
+ goto probe_master_put;
}
ret = clk_prepare_enable(cqspi->clk);
@@ -1390,13 +1253,13 @@ static int cqspi_probe(struct platform_device *pdev)
rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
if (IS_ERR(rstc)) {
dev_err(dev, "Cannot get QSPI reset.\n");
- return PTR_ERR(rstc);
+ goto probe_reset_failed;
}
rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
if (IS_ERR(rstc_ocp)) {
dev_err(dev, "Cannot get QSPI OCP reset.\n");
- return PTR_ERR(rstc_ocp);
+ goto probe_reset_failed;
}
reset_control_assert(rstc);
@@ -1407,15 +1270,21 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
ddata = of_device_get_match_data(dev);
- if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
- cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
- cqspi->master_ref_clk_hz);
+ if (ddata) {
+ if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
+ if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
+ master->mode_bits |= SPI_RX_OCTAL;
+ if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
+ cqspi->use_direct_mode = true;
+ }
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
dev_err(dev, "Cannot request IRQ.\n");
- goto probe_irq_failed;
+ goto probe_reset_failed;
}
cqspi_wait_idle(cqspi);
@@ -1423,31 +1292,40 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
- ret = cqspi_setup_flash(cqspi, np);
+ ret = cqspi_setup_flash(cqspi);
if (ret) {
- dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
+ dev_err(dev, "failed to setup flash parameters %d\n", ret);
goto probe_setup_failed;
}
- return ret;
+ if (cqspi->use_direct_mode) {
+ ret = cqspi_request_mmap_dma(cqspi);
+ if (ret == -EPROBE_DEFER)
+ goto probe_setup_failed;
+ }
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ return 0;
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
-probe_irq_failed:
+probe_reset_failed:
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+probe_master_put:
+ spi_master_put(master);
return ret;
}
static int cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
- if (cqspi->f_pdata[i].registered)
- mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
cqspi_controller_enable(cqspi, 0);
@@ -1490,16 +1368,15 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct cqspi_driver_platdata cdns_qspi = {
- .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
+ .quirks = CQSPI_DISABLE_DAC_MODE,
};
static const struct cqspi_driver_platdata k2g_qspi = {
- .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata am654_ospi = {
- .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_NEEDS_WR_DELAY,
};
@@ -1538,3 +1415,5 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" CQSPI_NAME);
MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
+MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 82a0ee09cbe1..2b6b9c1ad9d0 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -556,7 +556,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
master->set_cs = cdns_spi_chipselect;
master->auto_runtime_pm = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
/* Set to default valid value */
master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index fd6b9caffaf0..60c0d6934654 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -64,6 +64,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
p->sys_freq = SYS_FREQ_DEFAULT;
dev_info(dev, "Set system clock to %u\n", p->sys_freq);
+ master->flags = SPI_MASTER_HALF_DUPLEX;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
SPI_LSB_FIRST | SPI_3WIRE;
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index f80e06c87fbe..8996115ce736 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -387,7 +387,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
status = PTR_ERR(mcfqspi->clk);
goto fail0;
}
- clk_enable(mcfqspi->clk);
+ clk_prepare_enable(mcfqspi->clk);
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -425,7 +425,7 @@ fail2:
pm_runtime_disable(&pdev->dev);
mcfqspi_cs_teardown(mcfqspi);
fail1:
- clk_disable(mcfqspi->clk);
+ clk_disable_unprepare(mcfqspi->clk);
fail0:
spi_master_put(master);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index f71c497393a6..818f2b22875d 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -236,7 +236,8 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
/**
* davinci_spi_get_prescale - Calculates the correct prescale value
- * @maxspeed_hz: the maximum rate the SPI clock can run at
+ * @dspi: the controller data
+ * @max_speed_hz: the maximum rate the SPI clock can run at
*
* This function calculates the prescale value that generates a clock rate
* less than or equal to the specified maximum.
@@ -576,7 +577,6 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
u32 errors = 0;
struct davinci_spi_config *spicfg;
struct davinci_spi_platform_data *pdata;
- unsigned uninitialized_var(rx_buf_count);
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
@@ -711,7 +711,7 @@ err_desc:
/**
* dummy_thread_fn - dummy thread function
* @irq: IRQ number for this SPI Master
- * @context_data: structure for SPI Master controller davinci_spi
+ * @data: structure for SPI Master controller davinci_spi
*
* This is to satisfy the request_threaded_irq() API so that the irq
* handler is called in interrupt context.
@@ -724,7 +724,7 @@ static irqreturn_t dummy_thread_fn(s32 irq, void *data)
/**
* davinci_spi_irq - Interrupt handler for SPI Master Controller
* @irq: IRQ number for this SPI Master
- * @context_data: structure for SPI Master controller davinci_spi
+ * @data: structure for SPI Master controller davinci_spi
*
* ISR will determine that interrupt arrives either for READ or WRITE command.
* According to command it will do the appropriate action. It will check
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index 5986c520b196..bb390ff67d1d 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -372,8 +372,20 @@ static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
{
u16 imr = 0, dma_ctrl = 0;
+ /*
+ * Having a Rx DMA channel serviced with higher priority than a Tx DMA
+ * channel might not be enough to provide a well balanced DMA-based
+ * SPI transfer interface. There might still be moments when the Tx DMA
+ * channel is occasionally handled faster than the Rx DMA channel.
+ * That in its turn will eventually cause the SPI Rx FIFO overflow if
+ * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
+ * cleared by the Rx DMA channel. In order to fix the problem the Tx
+ * DMA activity is intentionally slowed down by limiting the SPI Tx
+ * FIFO depth with a value twice bigger than the Tx burst length
+ * calculated earlier by the dw_spi_dma_maxburst_init() method.
+ */
dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
- dw_writel(dws, DW_SPI_DMATDLR, dws->fifo_len - dws->txburst);
+ dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
if (xfer->tx_buf)
dma_ctrl |= SPI_DMA_TDMAE;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 8c854b187b1d..aa676559d273 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -10,7 +10,7 @@
*
* For more information about the SPI controller see documentation on Cirrus
* Logic web site:
- * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
+ * https://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
*/
#include <linux/io.h>
@@ -214,7 +214,7 @@ static void ep93xx_do_read(struct spi_master *master)
/**
* ep93xx_spi_read_write() - perform next RX/TX transfer
- * @espi: ep93xx SPI controller struct
+ * @master: SPI master
*
* This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
* called several times, the whole transfer will be completed. Returns
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 1552b28b9515..85a5c952389a 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -11,7 +11,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -19,11 +18,9 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-imx.h>
-#include <linux/platform_data/spi-imx.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -101,6 +98,7 @@ struct fsl_lpspi_data {
struct clk *clk_ipg;
struct clk *clk_per;
bool is_slave;
+ bool is_only_cs1;
bool is_first_byte;
void *rx_buf;
@@ -122,8 +120,6 @@ struct fsl_lpspi_data {
bool usedma;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
-
- int chipselect[];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -224,20 +220,6 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
return 0;
}
-static int fsl_lpspi_prepare_message(struct spi_controller *controller,
- struct spi_message *msg)
-{
- struct fsl_lpspi_data *fsl_lpspi =
- spi_controller_get_devdata(controller);
- struct spi_device *spi = msg->spi;
- int gpio = fsl_lpspi->chipselect[spi->chip_select];
-
- if (gpio_is_valid(gpio))
- gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
-
- return 0;
-}
-
static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
{
u8 txfifo_cnt;
@@ -276,10 +258,9 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
temp |= fsl_lpspi->config.bpw - 1;
temp |= (fsl_lpspi->config.mode & 0x3) << 30;
+ temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
if (!fsl_lpspi->is_slave) {
temp |= fsl_lpspi->config.prescale << 27;
- temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
-
/*
* Set TCR_CONT will keep SS asserted after current transfer.
* For the first transfer, clear TCR_CONTC to assert SS.
@@ -440,7 +421,10 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->config.mode = spi->mode;
fsl_lpspi->config.bpw = t->bits_per_word;
fsl_lpspi->config.speed_hz = t->speed_hz;
- fsl_lpspi->config.chip_select = spi->chip_select;
+ if (fsl_lpspi->is_only_cs1)
+ fsl_lpspi->config.chip_select = 1;
+ else
+ fsl_lpspi->config.chip_select = spi->chip_select;
if (!fsl_lpspi->config.speed_hz)
fsl_lpspi->config.speed_hz = spi->max_speed_hz;
@@ -831,13 +815,10 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi)
static int fsl_lpspi_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct fsl_lpspi_data *fsl_lpspi;
struct spi_controller *controller;
- struct spi_imx_master *lpspi_platform_info =
- dev_get_platdata(&pdev->dev);
struct resource *res;
- int i, ret, irq;
+ int ret, irq;
u32 temp;
bool is_slave;
@@ -857,6 +838,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = is_slave;
+ fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
+ "fsl,spi-only-use-cs1-sel");
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->transfer_one = fsl_lpspi_transfer_one;
@@ -867,35 +850,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
controller->dev.of_node = pdev->dev.of_node;
controller->bus_num = pdev->id;
controller->slave_abort = fsl_lpspi_slave_abort;
-
- ret = devm_spi_register_controller(&pdev->dev, controller);
- if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
- goto out_controller_put;
- }
-
- if (!fsl_lpspi->is_slave) {
- for (i = 0; i < controller->num_chipselect; i++) {
- int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
-
- if (!gpio_is_valid(cs_gpio) && lpspi_platform_info)
- cs_gpio = lpspi_platform_info->chipselect[i];
-
- fsl_lpspi->chipselect[i] = cs_gpio;
- if (!gpio_is_valid(cs_gpio))
- continue;
-
- ret = devm_gpio_request(&pdev->dev,
- fsl_lpspi->chipselect[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get cs gpios\n");
- goto out_controller_put;
- }
- }
- controller->cs_gpios = fsl_lpspi->chipselect;
- controller->prepare_message = fsl_lpspi_prepare_message;
- }
+ if (!fsl_lpspi->is_slave)
+ controller->use_gpio_descriptors = true;
init_completion(&fsl_lpspi->xfer_done);
@@ -954,10 +910,21 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_pm_get;
+ }
+
+ pm_runtime_mark_last_busy(fsl_lpspi->dev);
+ pm_runtime_put_autosuspend(fsl_lpspi->dev);
+
return 0;
out_pm_get:
- pm_runtime_put_noidle(fsl_lpspi->dev);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev);
+ pm_runtime_put_sync(fsl_lpspi->dev);
+ pm_runtime_disable(fsl_lpspi->dev);
out_controller_put:
spi_controller_put(controller);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 6766262d7e75..9851551ebbe0 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -15,7 +15,7 @@
* Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
* Suresh Gupta <suresh.gupta@nxp.com>
*
- * Based on the original fsl-quadspi.c spi-nor driver:
+ * Based on the original fsl-quadspi.c SPI NOR driver:
* Author: Freescale Semiconductor, Inc.
*
*/
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 67f022b8c81d..299e9870cf58 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -90,7 +90,7 @@ static void fsl_spi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
__be32 __iomem *mode = &reg_base->mode;
unsigned long flags;
@@ -291,7 +291,7 @@ static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
u32 word;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
mspi->count = len;
@@ -309,7 +309,7 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
bool is_dma_mapped)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_reg *reg_base;
+ struct fsl_spi_reg __iomem *reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
@@ -440,7 +440,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
static int fsl_spi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_spi_reg *reg_base;
+ struct fsl_spi_reg __iomem *reg_base;
int retval;
u32 hw_mode;
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -495,7 +495,7 @@ static void fsl_spi_cleanup(struct spi_device *spi)
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
@@ -530,7 +530,7 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
struct mpc8xxx_spi *mspi = context_data;
irqreturn_t ret = IRQ_NONE;
u32 events;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(&reg_base->event);
@@ -550,7 +550,7 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
u32 slvsel;
u16 cs = spi->chip_select;
@@ -568,7 +568,7 @@ static void fsl_spi_grlib_probe(struct device *dev)
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
int mbits;
u32 capabilities;
@@ -594,7 +594,7 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
- struct fsl_spi_reg *reg_base;
+ struct fsl_spi_reg __iomem *reg_base;
u32 regval;
int ret = 0;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index c3972424af71..80cea5cd3612 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -7,6 +7,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/spi/spi.h>
@@ -51,7 +52,6 @@
/* M_CMD OP codes for SPI */
#define SPI_TX_ONLY 1
#define SPI_RX_ONLY 2
-#define SPI_FULL_DUPLEX 3
#define SPI_TX_RX 7
#define SPI_CS_ASSERT 8
#define SPI_CS_DEASSERT 9
@@ -63,29 +63,26 @@
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
-enum spi_m_cmd_opcode {
- CMD_NONE,
- CMD_XFER,
- CMD_CS,
- CMD_CANCEL,
-};
-
struct spi_geni_master {
struct geni_se se;
struct device *dev;
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
+ u32 last_mode;
unsigned long cur_speed_hz;
+ unsigned long cur_sclk_hz;
unsigned int cur_bits_per_word;
unsigned int tx_rem_bytes;
unsigned int rx_rem_bytes;
const struct spi_transfer *cur_xfer;
- struct completion xfer_done;
+ struct completion cs_done;
+ struct completion cancel_done;
+ struct completion abort_done;
unsigned int oversampling;
spinlock_t lock;
- enum spi_m_cmd_opcode cur_mcmd;
int irq;
+ bool cs_flag;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -95,7 +92,6 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
{
unsigned long sclk_freq;
unsigned int actual_hz;
- struct geni_se *se = &mas->se;
int ret;
ret = geni_se_clk_freq_match(&mas->se,
@@ -112,9 +108,12 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
actual_hz, sclk_freq, *clk_idx, *clk_div);
- ret = clk_set_rate(se->clk, sclk_freq);
+ ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
if (ret)
- dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
+ dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
+ else
+ mas->cur_sclk_hz = sclk_freq;
+
return ret;
}
@@ -122,24 +121,26 @@ static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
- unsigned long time_left, flags;
+ unsigned long time_left;
struct geni_se *se = &mas->se;
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
- mas->cur_mcmd = CMD_CANCEL;
- geni_se_cancel_m_cmd(se);
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->cancel_done);
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ mas->cur_xfer = NULL;
+ geni_se_cancel_m_cmd(se);
+ spin_unlock_irq(&mas->lock);
+
+ time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
return;
- spin_lock_irqsave(&mas->lock, flags);
- reinit_completion(&mas->xfer_done);
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->abort_done);
geni_se_abort_m_cmd(se);
- spin_unlock_irqrestore(&mas->lock, flags);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ spin_unlock_irq(&mas->lock);
+
+ time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
if (!time_left)
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
}
@@ -151,18 +152,24 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
struct geni_se *se = &mas->se;
unsigned long time_left;
- reinit_completion(&mas->xfer_done);
- pm_runtime_get_sync(mas->dev);
if (!(slv->mode & SPI_CS_HIGH))
set_flag = !set_flag;
- mas->cur_mcmd = CMD_CS;
+ if (set_flag == mas->cs_flag)
+ return;
+
+ mas->cs_flag = set_flag;
+
+ pm_runtime_get_sync(mas->dev);
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
else
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
+ spin_unlock_irq(&mas->lock);
- time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left)
handle_fifo_timeout(spi, NULL);
@@ -177,8 +184,6 @@ static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
struct geni_se *se = &mas->se;
u32 word_len;
- word_len = readl(se->base + SE_SPI_WORD_LEN);
-
/*
* If bits_per_word isn't a byte aligned value, set the packing to be
* 1 SPI word per FIFO word.
@@ -187,74 +192,94 @@ static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
pack_words = mas->fifo_width_bits / bits_per_word;
else
pack_words = 1;
- word_len &= ~WORD_LEN_MSK;
- word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
true, true);
+ word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
writel(word_len, se->base + SE_SPI_WORD_LEN);
}
-static int setup_fifo_params(struct spi_device *spi_slv,
- struct spi_master *spi)
+static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
+ unsigned long clk_hz)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ u32 clk_sel, m_clk_cfg, idx, div;
struct geni_se *se = &mas->se;
- u32 loopback_cfg, cpol, cpha, demux_output_inv;
- u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
int ret;
- loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
- cpol = readl(se->base + SE_SPI_CPOL);
- cpha = readl(se->base + SE_SPI_CPHA);
- demux_output_inv = 0;
- loopback_cfg &= ~LOOPBACK_MSK;
- cpol &= ~CPOL;
- cpha &= ~CPHA;
-
- if (spi_slv->mode & SPI_LOOP)
- loopback_cfg |= LOOPBACK_ENABLE;
-
- if (spi_slv->mode & SPI_CPOL)
- cpol |= CPOL;
-
- if (spi_slv->mode & SPI_CPHA)
- cpha |= CPHA;
-
- if (spi_slv->mode & SPI_CS_HIGH)
- demux_output_inv = BIT(spi_slv->chip_select);
-
- demux_sel = spi_slv->chip_select;
- mas->cur_speed_hz = spi_slv->max_speed_hz;
- mas->cur_bits_per_word = spi_slv->bits_per_word;
+ if (clk_hz == mas->cur_speed_hz)
+ return 0;
- ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+ ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
if (ret) {
- dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
- ret, mas->cur_speed_hz);
+ dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
return ret;
}
+ /*
+ * SPI core clock gets configured with the requested frequency
+ * or the frequency closer to the requested frequency.
+ * For that reason requested frequency is stored in the
+ * cur_speed_hz and referred in the consecutive transfer instead
+ * of calling clk_get_rate() API.
+ */
+ mas->cur_speed_hz = clk_hz;
+
clk_sel = idx & CLK_SEL_MSK;
m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
- spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
- writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
- writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
- writel(cpha, se->base + SE_SPI_CPHA);
- writel(cpol, se->base + SE_SPI_CPOL);
- writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
writel(clk_sel, se->base + SE_GENI_CLK_SEL);
writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
+
+ /* Set BW quota for CPU as driver supports FIFO mode only. */
+ se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
return 0;
}
+static int setup_fifo_params(struct spi_device *spi_slv,
+ struct spi_master *spi)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
+ u32 demux_sel;
+
+ if (mas->last_mode != spi_slv->mode) {
+ if (spi_slv->mode & SPI_LOOP)
+ loopback_cfg = LOOPBACK_ENABLE;
+
+ if (spi_slv->mode & SPI_CPOL)
+ cpol = CPOL;
+
+ if (spi_slv->mode & SPI_CPHA)
+ cpha = CPHA;
+
+ if (spi_slv->mode & SPI_CS_HIGH)
+ demux_output_inv = BIT(spi_slv->chip_select);
+
+ demux_sel = spi_slv->chip_select;
+ mas->cur_bits_per_word = spi_slv->bits_per_word;
+
+ spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+ writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
+ writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
+ writel(cpha, se->base + SE_SPI_CPHA);
+ writel(cpol, se->base + SE_SPI_CPOL);
+ writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
+
+ mas->last_mode = spi_slv->mode;
+ }
+
+ return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
+}
+
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
- struct geni_se *se = &mas->se;
- geni_se_select_mode(se, GENI_SE_FIFO);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -283,7 +308,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
* Hardware programming guide suggests to configure
* RX FIFO RFR level to fifo_depth-2.
*/
- geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
+ geni_se_init(se, mas->tx_fifo_depth / 2, mas->tx_fifo_depth - 2);
/* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1;
ver = geni_se_get_qup_hw_version(se);
@@ -295,6 +320,8 @@ static int spi_geni_init(struct spi_geni_master *mas)
else
mas->oversampling = 1;
+ geni_se_select_mode(se, GENI_SE_FIFO);
+
pm_runtime_put(mas->dev);
return 0;
}
@@ -306,6 +333,22 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
u32 m_cmd = 0;
u32 spi_tx_cfg, len;
struct geni_se *se = &mas->se;
+ int ret;
+
+ /*
+ * Ensure that our interrupt handler isn't still running from some
+ * prior command before we start messing with the hardware behind
+ * its back. We don't need to _keep_ the lock here since we're only
+ * worried about racing with out interrupt handler. The SPI core
+ * already handles making sure that we're not trying to do two
+ * transfers at once or setting a chip select and doing a transfer
+ * concurrently.
+ *
+ * NOTE: we actually _can't_ hold the lock here because possibly we
+ * might call clk_set_rate() which needs to be able to sleep.
+ */
+ spin_lock_irq(&mas->lock);
+ spin_unlock_irq(&mas->lock);
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
if (xfer->bits_per_word != mas->cur_bits_per_word) {
@@ -314,38 +357,12 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
}
/* Speed and bits per word can be overridden per transfer */
- if (xfer->speed_hz != mas->cur_speed_hz) {
- int ret;
- u32 clk_sel, m_clk_cfg;
- unsigned int idx, div;
-
- ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
- if (ret) {
- dev_err(mas->dev, "Err setting clks:%d\n", ret);
- return;
- }
- /*
- * SPI core clock gets configured with the requested frequency
- * or the frequency closer to the requested frequency.
- * For that reason requested frequency is stored in the
- * cur_speed_hz and referred in the consecutive transfer instead
- * of calling clk_get_rate() API.
- */
- mas->cur_speed_hz = xfer->speed_hz;
- clk_sel = idx & CLK_SEL_MSK;
- m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
- writel(clk_sel, se->base + SE_GENI_CLK_SEL);
- writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
- }
+ ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
+ if (ret)
+ return;
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
- if (xfer->tx_buf && xfer->rx_buf)
- m_cmd = SPI_FULL_DUPLEX;
- else if (xfer->tx_buf)
- m_cmd = SPI_TX_ONLY;
- else if (xfer->rx_buf)
- m_cmd = SPI_RX_ONLY;
spi_tx_cfg &= ~CS_TOGGLE;
@@ -356,17 +373,24 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
len &= TRANS_LEN_MSK;
mas->cur_xfer = xfer;
- if (m_cmd & SPI_TX_ONLY) {
+ if (xfer->tx_buf) {
+ m_cmd |= SPI_TX_ONLY;
mas->tx_rem_bytes = xfer->len;
writel(len, se->base + SE_SPI_TX_TRANS_LEN);
}
- if (m_cmd & SPI_RX_ONLY) {
+ if (xfer->rx_buf) {
+ m_cmd |= SPI_RX_ONLY;
writel(len, se->base + SE_SPI_RX_TRANS_LEN);
mas->rx_rem_bytes = xfer->len;
}
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
- mas->cur_mcmd = CMD_XFER;
+
+ /*
+ * Lock around right before we start the transfer since our
+ * interrupt could come in at any time now.
+ */
+ spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
/*
@@ -376,6 +400,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
*/
if (m_cmd & SPI_TX_ONLY)
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
+ spin_unlock_irq(&mas->lock);
}
static int spi_geni_transfer_one(struct spi_master *spi,
@@ -477,13 +502,17 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 m_irq;
- unsigned long flags;
- if (mas->cur_mcmd == CMD_NONE)
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ if (!m_irq)
return IRQ_NONE;
- spin_lock_irqsave(&mas->lock, flags);
- m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
+ M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
+ M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
+ dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
+
+ spin_lock(&mas->lock);
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
geni_spi_handle_rx(mas);
@@ -492,39 +521,57 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
geni_spi_handle_tx(mas);
if (m_irq & M_CMD_DONE_EN) {
- if (mas->cur_mcmd == CMD_XFER)
+ if (mas->cur_xfer) {
spi_finalize_current_transfer(spi);
- else if (mas->cur_mcmd == CMD_CS)
- complete(&mas->xfer_done);
- mas->cur_mcmd = CMD_NONE;
- /*
- * If this happens, then a CMD_DONE came before all the Tx
- * buffer bytes were sent out. This is unusual, log this
- * condition and disable the WM interrupt to prevent the
- * system from stalling due an interrupt storm.
- * If this happens when all Rx bytes haven't been received, log
- * the condition.
- * The only known time this can happen is if bits_per_word != 8
- * and some registers that expect xfer lengths in num spi_words
- * weren't written correctly.
- */
- if (mas->tx_rem_bytes) {
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
- mas->tx_rem_bytes, mas->cur_bits_per_word);
+ mas->cur_xfer = NULL;
+ /*
+ * If this happens, then a CMD_DONE came before all the
+ * Tx buffer bytes were sent out. This is unusual, log
+ * this condition and disable the WM interrupt to
+ * prevent the system from stalling due an interrupt
+ * storm.
+ *
+ * If this happens when all Rx bytes haven't been
+ * received, log the condition. The only known time
+ * this can happen is if bits_per_word != 8 and some
+ * registers that expect xfer lengths in num spi_words
+ * weren't written correctly.
+ */
+ if (mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+ mas->tx_rem_bytes, mas->cur_bits_per_word);
+ }
+ if (mas->rx_rem_bytes)
+ dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+ mas->rx_rem_bytes, mas->cur_bits_per_word);
+ } else {
+ complete(&mas->cs_done);
}
- if (mas->rx_rem_bytes)
- dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
- mas->rx_rem_bytes, mas->cur_bits_per_word);
}
- if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) {
- mas->cur_mcmd = CMD_NONE;
- complete(&mas->xfer_done);
- }
+ if (m_irq & M_CMD_CANCEL_EN)
+ complete(&mas->cancel_done);
+ if (m_irq & M_CMD_ABORT_EN)
+ complete(&mas->abort_done);
+ /*
+ * It's safe or a good idea to Ack all of our our interrupts at the
+ * end of the function. Specifically:
+ * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
+ * clearing Acks. Clearing at the end relies on nobody else having
+ * started a new transfer yet or else we could be clearing _their_
+ * done bit, but everyone grabs the spinlock before starting a new
+ * transfer.
+ * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
+ * to be "latched level" interrupts so it's important to clear them
+ * _after_ you've handled the condition and always safe to do so
+ * since they'll re-assert if they're still happening.
+ */
writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
- spin_unlock_irqrestore(&mas->lock, flags);
+
+ spin_unlock(&mas->lock);
+
return IRQ_HANDLED;
}
@@ -561,6 +608,17 @@ static int spi_geni_probe(struct platform_device *pdev)
mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
+ mas->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se");
+ if (IS_ERR(mas->se.opp_table))
+ return PTR_ERR(mas->se.opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ mas->se.has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
spi->bus_num = -1;
spi->dev.of_node = dev->of_node;
@@ -574,10 +632,25 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->handle_err = handle_fifo_timeout;
spi->set_cs = spi_geni_set_cs;
- init_completion(&mas->xfer_done);
+ init_completion(&mas->cs_done);
+ init_completion(&mas->cancel_done);
+ init_completion(&mas->abort_done);
spin_lock_init(&mas->lock);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
pm_runtime_enable(dev);
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+ /* Set the bus quota to a reasonable value for register access */
+ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ ret = geni_icc_set_bw(&mas->se);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
@@ -596,6 +669,9 @@ spi_geni_probe_free_irq:
spi_geni_probe_runtime_disable:
pm_runtime_disable(dev);
spi_master_put(spi);
+ if (mas->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(mas->se.opp_table);
return ret;
}
@@ -609,6 +685,9 @@ static int spi_geni_remove(struct platform_device *pdev)
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
+ if (mas->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(mas->se.opp_table);
return 0;
}
@@ -616,16 +695,33 @@ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
- return geni_se_resources_off(&mas->se);
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+
+ ret = geni_se_resources_off(&mas->se);
+ if (ret)
+ return ret;
+
+ return geni_icc_disable(&mas->se);
}
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
+
+ ret = geni_icc_enable(&mas->se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(&mas->se);
+ if (ret)
+ return ret;
- return geni_se_resources_on(&mas->se);
+ return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
}
static int __maybe_unused spi_geni_suspend(struct device *dev)
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 8543f5ed1099..b068537375d6 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -102,10 +101,6 @@ struct img_spfi {
bool rx_dma_busy;
};
-struct img_spfi_device_data {
- bool gpio_requested;
-};
-
static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
{
return readl(spfi->regs + reg);
@@ -442,54 +437,6 @@ static int img_spfi_unprepare(struct spi_master *master,
return 0;
}
-static int img_spfi_setup(struct spi_device *spi)
-{
- int ret = -EINVAL;
- struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
-
- if (!spfi_data) {
- spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
- if (!spfi_data)
- return -ENOMEM;
- spfi_data->gpio_requested = false;
- spi_set_ctldata(spi, spfi_data);
- }
- if (!spfi_data->gpio_requested) {
- ret = gpio_request_one(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (ret)
- dev_err(&spi->dev, "can't request chipselect gpio %d\n",
- spi->cs_gpio);
- else
- spfi_data->gpio_requested = true;
- } else {
- if (gpio_is_valid(spi->cs_gpio)) {
- int mode = ((spi->mode & SPI_CS_HIGH) ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
-
- ret = gpio_direction_output(spi->cs_gpio, mode);
- if (ret)
- dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
- spi->cs_gpio, ret);
- }
- }
- return ret;
-}
-
-static void img_spfi_cleanup(struct spi_device *spi)
-{
- struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
-
- if (spfi_data) {
- if (spfi_data->gpio_requested)
- gpio_free(spi->cs_gpio);
- kfree(spfi_data);
- spi_set_ctldata(spi, NULL);
- }
-}
-
static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -659,12 +606,11 @@ static int img_spfi_probe(struct platform_device *pdev)
master->max_speed_hz = max_speed_hz;
}
- master->setup = img_spfi_setup;
- master->cleanup = img_spfi_cleanup;
master->transfer_one = img_spfi_transfer_one;
master->prepare_message = img_spfi_prepare;
master->unprepare_message = img_spfi_unprepare;
master->handle_err = img_spfi_handle_err;
+ master->use_gpio_descriptors = true;
spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
if (IS_ERR(spfi->tx_ch)) {
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b7a85e3fe1c1..38a5f1304cec 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -8,23 +8,23 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/property.h>
#include <linux/platform_data/dma-imx.h>
-#include <linux/platform_data/spi-imx.h>
#define DRIVER_NAME "spi_imx"
@@ -32,6 +32,8 @@ static bool use_dma = true;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Enable usage of DMA when available (default)");
+#define MXC_RPM_TIMEOUT 2000 /* 2000ms */
+
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
@@ -224,7 +226,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- if (!use_dma)
+ if (!use_dma || master->fallback)
return false;
if (!master->dma_rx)
@@ -723,7 +725,7 @@ static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
reg |= MX31_CSPICTRL_POL;
if (spi->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
- if (!gpio_is_valid(spi->cs_gpio))
+ if (!spi->cs_gpiod)
reg |= (spi->chip_select) <<
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
@@ -824,7 +826,7 @@ static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
reg |= MX21_CSPICTRL_POL;
if (spi->mode & SPI_CS_HIGH)
reg |= MX21_CSPICTRL_SSPOL;
- if (!gpio_is_valid(spi->cs_gpio))
+ if (!spi->cs_gpiod)
reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
@@ -1056,20 +1058,6 @@ static const struct of_device_id spi_imx_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
-static void spi_imx_chipselect(struct spi_device *spi, int is_active)
-{
- int active = is_active != BITBANG_CS_INACTIVE;
- int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
-
- if (spi->mode & SPI_NO_CS)
- return;
-
- if (!gpio_is_valid(spi->cs_gpio))
- return;
-
- gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
-}
-
static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
{
u32 ctrl;
@@ -1364,11 +1352,12 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
ret = spi_imx_dma_configure(master);
if (ret)
- return ret;
+ goto dma_failure_no_start;
if (!spi_imx->devtype_data->setup_wml) {
dev_err(spi_imx->dev, "No setup_wml()?\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto dma_failure_no_start;
}
spi_imx->devtype_data->setup_wml(spi_imx);
@@ -1379,8 +1368,10 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx)
- return -EINVAL;
+ if (!desc_rx) {
+ ret = -EINVAL;
+ goto dma_failure_no_start;
+ }
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
@@ -1425,6 +1416,10 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
}
return transfer->len;
+/* fallback to pio */
+dma_failure_no_start:
+ transfer->error |= SPI_TRANS_FAIL_NO_START;
+ return ret;
}
static int spi_imx_pio_transfer(struct spi_device *spi,
@@ -1507,7 +1502,6 @@ static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- int ret;
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
@@ -1516,21 +1510,8 @@ static int spi_imx_transfer(struct spi_device *spi,
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
- /*
- * fallback PIO mode if dma setup error happen, for example sdma
- * firmware may not be updated as ERR009165 required.
- */
- if (spi_imx->usedma) {
- ret = spi_imx_dma_transfer(spi_imx, transfer);
- if (ret != -EINVAL)
- return ret;
-
- spi_imx->devtype_data->disable_dma(spi_imx);
-
- spi_imx->usedma = false;
- spi_imx->dynamic_burst = spi_imx->devtype_data->dynamic_burst;
- dev_dbg(&spi->dev, "Fallback to PIO mode\n");
- }
+ if (spi_imx->usedma)
+ return spi_imx_dma_transfer(spi_imx, transfer);
return spi_imx_pio_transfer(spi, transfer);
}
@@ -1540,15 +1521,6 @@ static int spi_imx_setup(struct spi_device *spi)
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
spi->mode, spi->bits_per_word, spi->max_speed_hz);
- if (spi->mode & SPI_NO_CS)
- return 0;
-
- if (gpio_is_valid(spi->cs_gpio))
- gpio_direction_output(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
-
- spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
-
return 0;
}
@@ -1562,20 +1534,16 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
int ret;
- ret = clk_enable(spi_imx->clk_per);
- if (ret)
- return ret;
-
- ret = clk_enable(spi_imx->clk_ipg);
- if (ret) {
- clk_disable(spi_imx->clk_per);
+ ret = pm_runtime_get_sync(spi_imx->dev);
+ if (ret < 0) {
+ dev_err(spi_imx->dev, "failed to enable clock\n");
return ret;
}
ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
if (ret) {
- clk_disable(spi_imx->clk_ipg);
- clk_disable(spi_imx->clk_per);
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
}
return ret;
@@ -1586,8 +1554,8 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- clk_disable(spi_imx->clk_ipg);
- clk_disable(spi_imx->clk_per);
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
return 0;
}
@@ -1606,20 +1574,14 @@ static int spi_imx_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(spi_imx_dt_ids, &pdev->dev);
- struct spi_imx_master *mxc_platform_info =
- dev_get_platdata(&pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- int i, ret, irq, spi_drctl;
+ int ret, irq, spi_drctl;
const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
bool slave_mode;
-
- if (!np && !mxc_platform_info) {
- dev_err(&pdev->dev, "can't get the platform data\n");
- return -EINVAL;
- }
+ u32 val;
slave_mode = devtype_data->has_slavemode &&
of_property_read_bool(np, "spi-slave");
@@ -1642,6 +1604,7 @@ static int spi_imx_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->bus_num = np ? -1 : pdev->id;
+ master->use_gpio_descriptors = true;
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
@@ -1650,28 +1613,17 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data = devtype_data;
- /* Get number of chip selects, either platform data or OF */
- if (mxc_platform_info) {
- master->num_chipselect = mxc_platform_info->num_chipselect;
- if (mxc_platform_info->chipselect) {
- master->cs_gpios = devm_kcalloc(&master->dev,
- master->num_chipselect, sizeof(int),
- GFP_KERNEL);
- if (!master->cs_gpios)
- return -ENOMEM;
-
- for (i = 0; i < master->num_chipselect; i++)
- master->cs_gpios[i] = mxc_platform_info->chipselect[i];
- }
- } else {
- u32 num_cs;
-
- if (!of_property_read_u32(np, "num-cs", &num_cs))
- master->num_chipselect = num_cs;
- /* If not preset, default value of 1 is used */
- }
+ /*
+ * Get number of chip selects from device properties. This can be
+ * coming from device tree or boardfiles, if it is not defined,
+ * a default value of 3 chip selects will be used, as all the legacy
+ * board files have <= 3 chip selects.
+ */
+ if (!device_property_read_u32(&pdev->dev, "num-cs", &val))
+ master->num_chipselect = val;
+ else
+ master->num_chipselect = 3;
- spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
spi_imx->bitbang.master->setup = spi_imx_setup;
@@ -1722,13 +1674,15 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_master_put;
}
- ret = clk_prepare_enable(spi_imx->clk_per);
- if (ret)
- goto out_master_put;
+ pm_runtime_enable(spi_imx->dev);
+ pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(spi_imx->dev);
- ret = clk_prepare_enable(spi_imx->clk_ipg);
- if (ret)
- goto out_put_per;
+ ret = pm_runtime_get_sync(spi_imx->dev);
+ if (ret < 0) {
+ dev_err(spi_imx->dev, "failed to enable clock\n");
+ goto out_runtime_pm_put;
+ }
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
@@ -1738,7 +1692,7 @@ static int spi_imx_probe(struct platform_device *pdev)
if (spi_imx->devtype_data->has_dmamode) {
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
if (ret == -EPROBE_DEFER)
- goto out_clk_put;
+ goto out_runtime_pm_put;
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n",
@@ -1753,38 +1707,20 @@ static int spi_imx_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
- /* Request GPIO CS lines, if any */
- if (!spi_imx->slave_mode && master->cs_gpios) {
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- ret = devm_gpio_request(&pdev->dev,
- master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
- master->cs_gpios[i]);
- goto out_spi_bitbang;
- }
- }
+ goto out_runtime_pm_put;
}
dev_info(&pdev->dev, "probed\n");
- clk_disable(spi_imx->clk_ipg);
- clk_disable(spi_imx->clk_per);
+ pm_runtime_mark_last_busy(spi_imx->dev);
+ pm_runtime_put_autosuspend(spi_imx->dev);
+
return ret;
-out_spi_bitbang:
- spi_bitbang_stop(&spi_imx->bitbang);
-out_clk_put:
- clk_disable_unprepare(spi_imx->clk_ipg);
-out_put_per:
- clk_disable_unprepare(spi_imx->clk_per);
+out_runtime_pm_put:
+ pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ pm_runtime_put_sync(spi_imx->dev);
+ pm_runtime_disable(spi_imx->dev);
out_master_put:
spi_master_put(master);
@@ -1799,30 +1735,82 @@ static int spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
- ret = clk_enable(spi_imx->clk_per);
+ ret = pm_runtime_get_sync(spi_imx->dev);
+ if (ret < 0) {
+ dev_err(spi_imx->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+
+ pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ pm_runtime_put_sync(spi_imx->dev);
+ pm_runtime_disable(spi_imx->dev);
+
+ spi_imx_sdma_exit(spi_imx);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static int __maybe_unused spi_imx_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_imx_data *spi_imx;
+ int ret;
+
+ spi_imx = spi_master_get_devdata(master);
+
+ ret = clk_prepare_enable(spi_imx->clk_per);
if (ret)
return ret;
- ret = clk_enable(spi_imx->clk_ipg);
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
if (ret) {
- clk_disable(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_per);
return ret;
}
- writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_ipg);
+ return 0;
+}
+
+static int __maybe_unused spi_imx_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_imx_data *spi_imx;
+
+ spi_imx = spi_master_get_devdata(master);
+
clk_disable_unprepare(spi_imx->clk_per);
- spi_imx_sdma_exit(spi_imx);
- spi_master_put(master);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+
+ return 0;
+}
+
+static int __maybe_unused spi_imx_suspend(struct device *dev)
+{
+ pinctrl_pm_select_sleep_state(dev);
+ return 0;
+}
+static int __maybe_unused spi_imx_resume(struct device *dev)
+{
+ pinctrl_pm_select_default_state(dev);
return 0;
}
+static const struct dev_pm_ops imx_spi_pm = {
+ SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend,
+ spi_imx_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend, spi_imx_resume)
+};
+
static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spi_imx_dt_ids,
- },
+ .pm = &imx_spi_pm,
+ },
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
.remove = spi_imx_remove,
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 1fd7ee53d451..dccef1dcea32 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -15,7 +15,6 @@
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -50,8 +49,6 @@
#define LTQ_SPI_RXCNT 0x84
#define LTQ_SPI_DMACON 0xec
#define LTQ_SPI_IRNEN 0xf4
-#define LTQ_SPI_IRNICR 0xf8
-#define LTQ_SPI_IRNCR 0xfc
#define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
#define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S)
@@ -61,9 +58,7 @@
#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
#define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
-#define LTQ_SPI_ID_TXFS_M (0x3F << LTQ_SPI_ID_TXFS_S)
#define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
-#define LTQ_SPI_ID_RXFS_M (0x3F << LTQ_SPI_ID_RXFS_S)
#define LTQ_SPI_ID_MOD_S 8 /* Module ID */
#define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S)
#define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */
@@ -126,19 +121,15 @@
LTQ_SPI_WHBSTATE_CLRTUE)
#define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
-#define LTQ_SPI_RXFCON_RXFITL_M (0x3F << LTQ_SPI_RXFCON_RXFITL_S)
#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
#define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
-#define LTQ_SPI_TXFCON_TXFITL_M (0x3F << LTQ_SPI_TXFCON_TXFITL_S)
#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
#define LTQ_SPI_FSTAT_RXFFL_S 0
-#define LTQ_SPI_FSTAT_RXFFL_M (0x3f << LTQ_SPI_FSTAT_RXFFL_S)
#define LTQ_SPI_FSTAT_TXFFL_S 8
-#define LTQ_SPI_FSTAT_TXFFL_M (0x3f << LTQ_SPI_FSTAT_TXFFL_S)
#define LTQ_SPI_GPOCON_ISCSBN_S 8
#define LTQ_SPI_GPOCON_INVOUTN_S 0
@@ -158,9 +149,16 @@
#define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
#define LTQ_SPI_IRNEN_ALL 0x1F
+struct lantiq_ssc_spi;
+
struct lantiq_ssc_hwcfg {
- unsigned int irnen_r;
- unsigned int irnen_t;
+ int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
+ unsigned int irnen_r;
+ unsigned int irnen_t;
+ unsigned int irncr;
+ unsigned int irnicr;
+ bool irq_ack;
+ u32 fifo_size_mask;
};
struct lantiq_ssc_spi {
@@ -184,6 +182,7 @@ struct lantiq_ssc_spi {
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
unsigned int base_cs;
+ unsigned int fdx_tx_level;
};
static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
@@ -209,16 +208,18 @@ static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
- return (fstat & LTQ_SPI_FSTAT_TXFFL_M) >> LTQ_SPI_FSTAT_TXFFL_S;
+ return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
}
static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
{
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
- return fstat & LTQ_SPI_FSTAT_RXFFL_M;
+ return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
}
static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
@@ -391,7 +392,7 @@ static int lantiq_ssc_setup(struct spi_device *spidev)
u32 gpocon;
/* GPIOs are used for CS */
- if (gpio_is_valid(spidev->cs_gpio))
+ if (spidev->cs_gpiod)
return 0;
dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
@@ -481,6 +482,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
u32 data;
unsigned int tx_free = tx_fifo_free(spi);
+ spi->fdx_tx_level = 0;
while (spi->tx_todo && tx_free) {
switch (spi->bits_per_word) {
case 2 ... 8:
@@ -509,6 +511,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
tx_free--;
+ spi->fdx_tx_level++;
}
}
@@ -520,6 +523,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
u32 data;
unsigned int rx_fill = rx_fifo_level(spi);
+ /*
+ * Wait until all expected data to be shifted in.
+ * Otherwise, rx overrun may occur.
+ */
+ while (rx_fill != spi->fdx_tx_level)
+ rx_fill = rx_fifo_level(spi);
+
while (rx_fill) {
data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
@@ -613,6 +623,13 @@ static void rx_request(struct lantiq_ssc_spi *spi)
static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
{
struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&spi->lock, flags);
+ if (hwcfg->irq_ack)
+ lantiq_ssc_writel(spi, val, hwcfg->irncr);
if (spi->tx) {
if (spi->rx && spi->rx_todo)
@@ -635,10 +652,12 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
}
}
+ spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_HANDLED;
completed:
queue_work(spi->wq, &spi->work);
+ spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_HANDLED;
}
@@ -646,11 +665,18 @@ completed:
static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
{
struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+ unsigned long flags;
if (!(stat & LTQ_SPI_STAT_ERRORS))
return IRQ_NONE;
+ spin_lock_irqsave(&spi->lock, flags);
+ if (hwcfg->irq_ack)
+ lantiq_ssc_writel(spi, val, hwcfg->irncr);
+
if (stat & LTQ_SPI_STAT_RUE)
dev_err(spi->dev, "receive underflow error\n");
if (stat & LTQ_SPI_STAT_TUE)
@@ -671,6 +697,25 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
if (spi->master->cur_msg)
spi->master->cur_msg->status = -EIO;
queue_work(spi->wq, &spi->work);
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
+{
+ struct lantiq_ssc_spi *spi = data;
+ const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
+ u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
+
+ if (!(val & LTQ_SPI_IRNEN_ALL))
+ return IRQ_NONE;
+
+ if (val & LTQ_SPI_IRNEN_E)
+ return lantiq_ssc_err_interrupt(irq, data);
+
+ if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
+ return lantiq_ssc_xmit_interrupt(irq, data);
return IRQ_HANDLED;
}
@@ -775,20 +820,84 @@ static int lantiq_ssc_transfer_one(struct spi_master *master,
return transfer_start(spi, spidev, t);
}
+static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
+{
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
+}
+
+static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
+{
+ int irq, err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
+ 0, LTQ_SPI_RX_IRQ_NAME, spi);
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
+ 0, LTQ_SPI_TX_IRQ_NAME, spi);
+
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
+ 0, LTQ_SPI_ERR_IRQ_NAME, spi);
+ return err;
+}
+
static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
- .irnen_r = LTQ_SPI_IRNEN_R_XWAY,
- .irnen_t = LTQ_SPI_IRNEN_T_XWAY,
+ .cfg_irq = lantiq_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XWAY,
+ .irnen_t = LTQ_SPI_IRNEN_T_XWAY,
+ .irnicr = 0xF8,
+ .irncr = 0xFC,
+ .fifo_size_mask = GENMASK(5, 0),
+ .irq_ack = false,
};
static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
- .irnen_r = LTQ_SPI_IRNEN_R_XRX,
- .irnen_t = LTQ_SPI_IRNEN_T_XRX,
+ .cfg_irq = lantiq_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XRX,
+ .irnen_t = LTQ_SPI_IRNEN_T_XRX,
+ .irnicr = 0xF8,
+ .irncr = 0xFC,
+ .fifo_size_mask = GENMASK(5, 0),
+ .irq_ack = false,
+};
+
+static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
+ .cfg_irq = intel_lgm_cfg_irq,
+ .irnen_r = LTQ_SPI_IRNEN_R_XRX,
+ .irnen_t = LTQ_SPI_IRNEN_T_XRX,
+ .irnicr = 0xFC,
+ .irncr = 0xF8,
+ .fifo_size_mask = GENMASK(7, 0),
+ .irq_ack = true,
};
static const struct of_device_id lantiq_ssc_match[] = {
{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
+ { .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
{},
};
MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
@@ -800,9 +909,9 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
const struct of_device_id *match;
- int err, rx_irq, tx_irq, err_irq;
u32 id, supports_dma, revision;
unsigned int num_cs;
+ int err;
match = of_match_device(lantiq_ssc_match, dev);
if (!match) {
@@ -811,18 +920,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
}
hwcfg = match->data;
- rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
- if (rx_irq < 0)
- return -ENXIO;
-
- tx_irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
- if (tx_irq < 0)
- return -ENXIO;
-
- err_irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
- if (err_irq < 0)
- return -ENXIO;
-
master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
if (!master)
return -ENOMEM;
@@ -838,18 +935,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
goto err_master_put;
}
- err = devm_request_irq(dev, rx_irq, lantiq_ssc_xmit_interrupt,
- 0, LTQ_SPI_RX_IRQ_NAME, spi);
- if (err)
- goto err_master_put;
-
- err = devm_request_irq(dev, tx_irq, lantiq_ssc_xmit_interrupt,
- 0, LTQ_SPI_TX_IRQ_NAME, spi);
- if (err)
- goto err_master_put;
-
- err = devm_request_irq(dev, err_irq, lantiq_ssc_err_interrupt,
- 0, LTQ_SPI_ERR_IRQ_NAME, spi);
+ err = hwcfg->cfg_irq(pdev, spi);
if (err)
goto err_master_put;
@@ -888,6 +974,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = num_cs;
+ master->use_gpio_descriptors = true;
master->setup = lantiq_ssc_setup;
master->set_cs = lantiq_ssc_set_cs;
master->handle_err = lantiq_ssc_handle_err;
@@ -907,8 +994,8 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
- spi->tx_fifo_size = (id & LTQ_SPI_ID_TXFS_M) >> LTQ_SPI_ID_TXFS_S;
- spi->rx_fifo_size = (id & LTQ_SPI_ID_RXFS_M) >> LTQ_SPI_ID_RXFS_S;
+ spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
+ spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
revision = id & LTQ_SPI_ID_REV_M;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index b6d79cd156fb..9522d1b5786d 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -885,10 +885,10 @@ static int spi_test_run_iter(struct spi_device *spi,
/**
* spi_test_execute_msg - default implementation to run a test
*
- * spi: @spi_device on which to run the @spi_message
- * test: the test to execute, which already contains @msg
- * tx: the tx buffer allocated for the test sequence
- * rx: the rx buffer allocated for the test sequence
+ * @spi: @spi_device on which to run the @spi_message
+ * @test: the test to execute, which already contains @msg
+ * @tx: the tx buffer allocated for the test sequence
+ * @rx: the rx buffer allocated for the test sequence
*
* Returns: error code of spi_sync as well as basic error checking
*/
@@ -957,10 +957,10 @@ EXPORT_SYMBOL_GPL(spi_test_execute_msg);
* including all the relevant iterations on:
* length and buffer alignment
*
- * spi: the spi_device to send the messages to
- * test: the test which we need to execute
- * tx: the tx buffer allocated for the test sequence
- * rx: the rx buffer allocated for the test sequence
+ * @spi: the spi_device to send the messages to
+ * @test: the test which we need to execute
+ * @tx: the tx buffer allocated for the test sequence
+ * @rx: the rx buffer allocated for the test sequence
*
* Returns: status code of spi_sync or other failures
*/
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 9a86cc27fcc0..ef53290b7d24 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -156,6 +156,12 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ return false;
+
+ if (op->cmd.nbytes != 1)
+ return false;
+
return true;
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
@@ -170,7 +176,7 @@ static bool spi_mem_buswidth_is_valid(u8 buswidth)
static int spi_mem_check_op(const struct spi_mem_op *op)
{
- if (!op->cmd.buswidth)
+ if (!op->cmd.buswidth || !op->cmd.nbytes)
return -EINVAL;
if ((op->addr.nbytes && !op->addr.buswidth) ||
@@ -306,8 +312,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return ret;
}
- tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
- op->dummy.nbytes;
+ tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
/*
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
@@ -322,7 +327,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
tmpbuf[0] = op->cmd.opcode;
xfers[xferpos].tx_buf = tmpbuf;
- xfers[xferpos].len = sizeof(op->cmd.opcode);
+ xfers[xferpos].len = op->cmd.nbytes;
xfers[xferpos].tx_nbits = op->cmd.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
@@ -424,8 +429,7 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
- len = sizeof(op->cmd.opcode) + op->addr.nbytes +
- op->dummy.nbytes;
+ len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if (len > spi_max_transfer_size(mem->spi))
return -EINVAL;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 77f7d0e0e46a..ecba6b4a5d85 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -362,8 +362,6 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
{
- u32 data;
-
if (spicc->data->has_oen)
writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO,
SPICC_ENH_MAIN_CLK_AO,
@@ -373,7 +371,7 @@ static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
spicc->base + SPICC_TESTREG);
while (meson_spicc_rxready(spicc))
- data = readl_relaxed(spicc->base + SPICC_RXDATA);
+ readl_relaxed(spicc->base + SPICC_RXDATA);
if (spicc->data->has_oen)
writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0,
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index c7b039980291..8eca6f24cb79 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -70,7 +70,7 @@
* @master: the SPI master
* @regmap: regmap for device registers
* @clk: input clock of the built-in baud rate generator
- * @device: the device structure
+ * @dev: the device structure
*/
struct meson_spifc {
struct spi_master *master;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a556795caeef..5d643051bf3d 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -171,6 +171,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,mt8183-spi",
.data = (void *)&mt8183_compat,
},
+ { .compatible = "mediatek,mt8192-spi",
+ .data = (void *)&mt6765_compat,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 7bc302b50396..b08d8e9a8ee9 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -195,7 +195,7 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
}
- len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes -
+ len = MTK_NOR_PRG_MAX_SIZE - op->cmd.nbytes - op->addr.nbytes -
op->dummy.nbytes;
if (op->data.nbytes > len)
op->data.nbytes = len;
@@ -211,6 +211,12 @@ static bool mtk_nor_supports_op(struct spi_mem *mem,
if (op->cmd.buswidth != 1)
return false;
+ /* DTR ops not supported. */
+ if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
+ return false;
+ if (op->cmd.nbytes != 1)
+ return false;
+
if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
return true;
@@ -219,7 +225,7 @@ static bool mtk_nor_supports_op(struct spi_mem *mem,
(op->dummy.buswidth == 0) &&
(op->data.buswidth == 1);
}
- len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if ((len > MTK_NOR_PRG_MAX_SIZE) ||
((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
return false;
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
index 69491f3a515d..8c630acb0110 100644
--- a/drivers/spi/spi-mxic.c
+++ b/drivers/spi/spi-mxic.c
@@ -356,6 +356,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
int nio = 1, i, ret;
u32 ss_ctrl;
u8 addr[8];
+ u8 opcode = op->cmd.opcode;
ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
if (ret)
@@ -393,7 +394,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
- ret = mxic_spi_data_xfer(mxic, &op->cmd.opcode, NULL, 1);
+ ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
if (ret)
goto out;
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index d25ee32862e0..9468e71f03ad 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Nuvoton Technology corporation.
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -177,7 +178,6 @@ enum {
#define MAP_SIZE_16MB 0x1000000
#define MAP_SIZE_8MB 0x800000
-#define NUM_BITS_IN_BYTE 8
#define FIU_DRD_MAX_DUMMY_NUMBER 3
#define NPCM_MAX_CHIP_NUM 4
#define CHUNK_SIZE 16
@@ -252,8 +252,8 @@ static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
fiu->drd_op.addr.buswidth = op->addr.buswidth;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_DBW,
- ((op->dummy.nbytes * ilog2(op->addr.buswidth))
- / NUM_BITS_IN_BYTE) << NPCM_FIU_DRD_DBW_SHIFT);
+ ((op->dummy.nbytes * ilog2(op->addr.buswidth)) / BITS_PER_BYTE)
+ << NPCM_FIU_DRD_DBW_SHIFT);
fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 87cd0233c60b..56d10c4511db 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -10,8 +10,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/reset.h>
#include <asm/unaligned.h>
@@ -344,16 +342,9 @@ static int npcm_pspi_probe(struct platform_device *pdev)
struct npcm_pspi *priv;
struct spi_master *master;
unsigned long clk_hz;
- struct device_node *np = pdev->dev.of_node;
- int num_cs, i;
- int csgpio;
int irq;
int ret;
- num_cs = of_gpio_named_count(np, "cs-gpios");
- if (num_cs < 0)
- return num_cs;
-
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
@@ -418,24 +409,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
npcm_pspi_prepare_transfer_hardware;
master->unprepare_transfer_hardware =
npcm_pspi_unprepare_transfer_hardware;
- master->num_chipselect = num_cs;
-
- for (i = 0; i < num_cs; i++) {
- csgpio = of_get_named_gpio(np, "cs-gpios", i);
- if (csgpio < 0) {
- dev_err(&pdev->dev, "failed to get csgpio#%u\n", i);
- goto out_disable_clk;
- }
- dev_dbg(&pdev->dev, "csgpio#%u = %d\n", i, csgpio);
- ret = devm_gpio_request_one(&pdev->dev, csgpio,
- GPIOF_OUT_INIT_HIGH, DRIVER_NAME);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to configure csgpio#%u %d\n"
- , i, csgpio);
- goto out_disable_clk;
- }
- }
+ master->use_gpio_descriptors = true;
/* set to default clock rate */
npcm_pspi_set_baudrate(priv, NPCM_PSPI_DEFAULT_CLK);
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 9df7c5979c29..f3843f0ff260 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -2,7 +2,7 @@
/*
* OpenCores tiny SPI master driver
*
- * http://opencores.org/project,tiny_spi
+ * https://opencores.org/project,tiny_spi
*
* Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
*
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 5c704ba6d8ea..36a4922a134a 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -19,7 +19,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index ce8dbdbce312..71402f71ddd8 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -443,7 +443,7 @@ static void uwire_cleanup(struct spi_device *spi)
static void uwire_off(struct uwire_spi *uwire)
{
uwire_write_reg(UWIRE_SR3, 0);
- clk_disable(uwire->ck);
+ clk_disable_unprepare(uwire->ck);
spi_master_put(uwire->bitbang.master);
}
@@ -475,7 +475,7 @@ static int uwire_probe(struct platform_device *pdev)
spi_master_put(master);
return status;
}
- clk_enable(uwire->ck);
+ clk_prepare_enable(uwire->ck);
if (cpu_is_omap7xx())
uwire_idx_shift = 1;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index e9e256718ef4..1c9478e6e5d9 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -27,7 +27,6 @@
#include <linux/iopoll.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -1043,16 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
-
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request gpio\n");
- return ret;
- }
- gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- }
}
ret = pm_runtime_get_sync(mcspi->dev);
@@ -1080,9 +1069,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(cs);
}
-
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
}
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
@@ -1152,7 +1138,7 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
omap2_mcspi_set_enable(spi, 0);
- if (gpio_is_valid(spi->cs_gpio))
+ if (spi->cs_gpiod)
omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
if (par_override ||
@@ -1241,7 +1227,7 @@ out:
omap2_mcspi_set_enable(spi, 0);
- if (gpio_is_valid(spi->cs_gpio))
+ if (spi->cs_gpiod)
omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
if (mcspi->fifo_depth > 0 && t)
@@ -1431,6 +1417,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+ master->use_gpio_descriptors = true;
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 43f73db22f21..b57b8b3cc26e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -708,7 +708,7 @@ static int orion_spi_probe(struct platform_device *pdev)
/*
* Only map one page for direct access. This is enough for the
* simple TX transfer which only writes to the first word.
- * This needs to get extended for the direct SPI-NOR / SPI-NAND
+ * This needs to get extended for the direct SPI NOR / SPI NAND
* support, once this gets implemented.
*/
dir_acc = &spi->child[cs].direct_access;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 66028ebbc336..d1776fea287e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -298,7 +298,7 @@ enum ssp_reading {
READING_U32
};
-/**
+/*
* The type of writing going on on this chip
*/
enum ssp_writing {
@@ -317,6 +317,7 @@ enum ssp_writing {
* @extended_cr: 32 bit wide control register 0 with extra
* features and extra features in CR1 as found in the ST variants
* @pl023: supports a subset of the ST extensions called "PL023"
+ * @loopback: supports loopback mode
* @internal_cs_ctrl: supports chip select control register
*/
struct vendor_data {
@@ -353,11 +354,14 @@ struct vendor_data {
* @read: the type of read currently going on
* @write: the type of write currently going on
* @exp_fifo_level: expected FIFO level
+ * @rx_lev_trig: receive FIFO watermark level which triggers IRQ
+ * @tx_lev_trig: transmit FIFO watermark level which triggers IRQ
* @dma_rx_channel: optional channel for RX DMA
* @dma_tx_channel: optional channel for TX DMA
* @sgt_rx: scattertable for the RX transfer
* @sgt_tx: scattertable for the TX transfer
* @dummypage: a dummy page used for driving data on the bus with DMA
+ * @dma_running: indicates whether DMA is in operation
* @cur_cs: current chip select (gpio)
* @chipselects: list of chipselects (gpios)
*/
@@ -662,7 +666,7 @@ static void load_ssp_default_config(struct pl022 *pl022)
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
-/**
+/*
* This will write to TX and read from RX according to the parameters
* set in pl022.
*/
@@ -1237,6 +1241,8 @@ static inline void pl022_dma_remove(struct pl022 *pl022)
/**
* pl022_interrupt_handler - Interrupt handler for SSP controller
+ * @irq: IRQ number
+ * @dev_id: Local device data
*
* This function handles interrupts generated for an interrupt based transfer.
* If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
@@ -1334,7 +1340,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/**
+/*
* This sets up the pointers to memory for the next message to
* send out on the SPI bus.
*/
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 0ea2d9a369d9..d8ee363fb714 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -28,11 +28,9 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -127,8 +125,6 @@ struct ppc4xx_spi {
const unsigned char *tx;
unsigned char *rx;
- int *gpios;
-
struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
struct spi_master *master;
struct device *dev;
@@ -260,27 +256,6 @@ static int spi_ppc4xx_setup(struct spi_device *spi)
return 0;
}
-static void spi_ppc4xx_chipsel(struct spi_device *spi, int value)
-{
- struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
- unsigned int cs = spi->chip_select;
- unsigned int cspol;
-
- /*
- * If there are no chip selects at all, or if this is the special
- * case of a non-existent (dummy) chip select, do nothing.
- */
-
- if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST)
- return;
-
- cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
- if (value == BITBANG_CS_INACTIVE)
- cspol = !cspol;
-
- gpio_set_value(hw->gpios[cs], cspol);
-}
-
static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id)
{
struct ppc4xx_spi *hw;
@@ -359,19 +334,6 @@ static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0);
}
-static void free_gpios(struct ppc4xx_spi *hw)
-{
- if (hw->master->num_chipselect) {
- int i;
- for (i = 0; i < hw->master->num_chipselect; i++)
- if (gpio_is_valid(hw->gpios[i]))
- gpio_free(hw->gpios[i]);
-
- kfree(hw->gpios);
- hw->gpios = NULL;
- }
-}
-
/*
* platform_device layer stuff...
*/
@@ -385,7 +347,6 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
struct device *dev = &op->dev;
struct device_node *opbnp;
int ret;
- int num_gpios;
const unsigned int *clk;
master = spi_alloc_master(dev, sizeof *hw);
@@ -399,74 +360,32 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
init_completion(&hw->done);
- /*
- * A count of zero implies a single SPI device without any chip-select.
- * Note that of_gpio_count counts all gpios assigned to this spi master.
- * This includes both "null" gpio's and real ones.
- */
- num_gpios = of_gpio_count(np);
- if (num_gpios > 0) {
- int i;
-
- hw->gpios = kcalloc(num_gpios, sizeof(*hw->gpios), GFP_KERNEL);
- if (!hw->gpios) {
- ret = -ENOMEM;
- goto free_master;
- }
-
- for (i = 0; i < num_gpios; i++) {
- int gpio;
- enum of_gpio_flags flags;
-
- gpio = of_get_gpio_flags(np, i, &flags);
- hw->gpios[i] = gpio;
-
- if (gpio_is_valid(gpio)) {
- /* Real CS - set the initial state. */
- ret = gpio_request(gpio, np->name);
- if (ret < 0) {
- dev_err(dev,
- "can't request gpio #%d: %d\n",
- i, ret);
- goto free_gpios;
- }
-
- gpio_direction_output(gpio,
- !!(flags & OF_GPIO_ACTIVE_LOW));
- } else if (gpio == -EEXIST) {
- ; /* No CS, but that's OK. */
- } else {
- dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
- ret = -EINVAL;
- goto free_gpios;
- }
- }
- }
-
/* Setup the state for the bitbang driver */
bbp = &hw->bitbang;
bbp->master = hw->master;
bbp->setup_transfer = spi_ppc4xx_setupxfer;
- bbp->chipselect = spi_ppc4xx_chipsel;
bbp->txrx_bufs = spi_ppc4xx_txrx;
bbp->use_dma = 0;
bbp->master->setup = spi_ppc4xx_setup;
bbp->master->cleanup = spi_ppc4xx_cleanup;
bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
+ bbp->master->use_gpio_descriptors = true;
+ /*
+ * The SPI core will count the number of GPIO descriptors to figure
+ * out the number of chip selects available on the platform.
+ */
+ bbp->master->num_chipselect = 0;
/* the spi->mode bits understood by this driver: */
bbp->master->mode_bits =
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
- /* this many pins in all GPIO controllers */
- bbp->master->num_chipselect = num_gpios > 0 ? num_gpios : 0;
-
/* Get the clock for the OPB */
opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
if (opbnp == NULL) {
dev_err(dev, "OPB: cannot find node\n");
ret = -ENODEV;
- goto free_gpios;
+ goto free_master;
}
/* Get the clock (Hz) for the OPB */
clk = of_get_property(opbnp, "clock-frequency", NULL);
@@ -474,7 +393,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
dev_err(dev, "OPB: no clock-frequency property set\n");
of_node_put(opbnp);
ret = -ENODEV;
- goto free_gpios;
+ goto free_master;
}
hw->opb_freq = *clk;
hw->opb_freq >>= 2;
@@ -483,7 +402,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_err(dev, "error while parsing device node resource\n");
- goto free_gpios;
+ goto free_master;
}
hw->mapbase = resource.start;
hw->mapsize = resource_size(&resource);
@@ -492,7 +411,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
dev_err(dev, "too small to map registers\n");
ret = -EINVAL;
- goto free_gpios;
+ goto free_master;
}
/* Request IRQ */
@@ -501,7 +420,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
0, "spi_ppc4xx_of", (void *)hw);
if (ret) {
dev_err(dev, "unable to allocate interrupt\n");
- goto free_gpios;
+ goto free_master;
}
if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
@@ -538,8 +457,6 @@ map_io_error:
release_mem_region(hw->mapbase, hw->mapsize);
request_mem_error:
free_irq(hw->irqnum, hw);
-free_gpios:
- free_gpios(hw);
free_master:
spi_master_put(master);
@@ -556,7 +473,6 @@ static int spi_ppc4xx_of_remove(struct platform_device *op)
release_mem_region(hw->mapbase, hw->mapsize);
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
- free_gpios(hw);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 0040362b7162..814268405ab0 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1432,6 +1432,7 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT33C0", LPSS_LPT_SSP },
{ "INT33C1", LPSS_LPT_SSP },
@@ -1442,6 +1443,7 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#endif
/*
* PCI IDs of compound devices that integrate both host controller and private
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 3c4f83bf7084..b8857a97f40a 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -2,12 +2,14 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_opp.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -139,7 +141,11 @@ struct qcom_qspi {
struct device *dev;
struct clk_bulk_data *clks;
struct qspi_xfer xfer;
- /* Lock to protect xfer and IRQ accessed registers */
+ struct icc_path *icc_path_cpu_to_qspi;
+ struct opp_table *opp_table;
+ bool has_opp_table;
+ unsigned long last_speed;
+ /* Lock to protect data accessed by IRQs */
spinlock_t lock;
};
@@ -221,6 +227,38 @@ static void qcom_qspi_handle_err(struct spi_master *master,
spin_unlock_irqrestore(&ctrl->lock, flags);
}
+static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
+{
+ int ret;
+ unsigned int avg_bw_cpu;
+
+ if (speed_hz == ctrl->last_speed)
+ return 0;
+
+ /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
+ ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set BW quota for CPU as driver supports FIFO mode only.
+ * We don't have explicit peak requirement so keep it equal to avg_bw.
+ */
+ avg_bw_cpu = Bps_to_icc(speed_hz);
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ctrl->last_speed = speed_hz;
+
+ return 0;
+}
+
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
@@ -234,12 +272,9 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
- /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
- ret = clk_set_rate(ctrl->clks[QSPI_CLK_CORE].clk, speed_hz * 4);
- if (ret) {
- dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ ret = qcom_qspi_set_speed(ctrl, speed_hz);
+ if (ret)
return ret;
- }
spin_lock_irqsave(&ctrl->lock, flags);
@@ -458,6 +493,29 @@ static int qcom_qspi_probe(struct platform_device *pdev)
if (ret)
goto exit_probe_master_put;
+ ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
+ if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) {
+ ret = PTR_ERR(ctrl->icc_path_cpu_to_qspi);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get cpu path: %d\n", ret);
+ goto exit_probe_master_put;
+ }
+ /* Set BW vote for register access */
+ ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
+ Bps_to_icc(1000));
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
+ __func__, ret);
+ goto exit_probe_master_put;
+ }
+
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ goto exit_probe_master_put;
+ }
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto exit_probe_master_put;
@@ -481,6 +539,22 @@ static int qcom_qspi_probe(struct platform_device *pdev)
master->handle_err = qcom_qspi_handle_err;
master->auto_runtime_pm = true;
+ ctrl->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
+ if (IS_ERR(ctrl->opp_table)) {
+ ret = PTR_ERR(ctrl->opp_table);
+ goto exit_probe_master_put;
+ }
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ ctrl->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ goto exit_probe_master_put;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
ret = spi_register_master(master);
@@ -488,6 +562,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return 0;
pm_runtime_disable(dev);
+ if (ctrl->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(ctrl->opp_table);
exit_probe_master_put:
spi_master_put(master);
@@ -498,11 +575,15 @@ exit_probe_master_put:
static int qcom_qspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
+ if (ctrl->has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(ctrl->opp_table);
return 0;
}
@@ -511,9 +592,19 @@ static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
+ ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
return 0;
}
@@ -521,8 +612,20 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+
+ ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
+ if (ret) {
+ dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ return ret;
- return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+ return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
}
static int __maybe_unused qcom_qspi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 9b8a5e1233c0..75a8a9428ff8 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -39,8 +39,9 @@
#define ROCKCHIP_SPI_RISR 0x0034
#define ROCKCHIP_SPI_ICR 0x0038
#define ROCKCHIP_SPI_DMACR 0x003c
-#define ROCKCHIP_SPI_DMATDLR 0x0040
-#define ROCKCHIP_SPI_DMARDLR 0x0044
+#define ROCKCHIP_SPI_DMATDLR 0x0040
+#define ROCKCHIP_SPI_DMARDLR 0x0044
+#define ROCKCHIP_SPI_VERSION 0x0048
#define ROCKCHIP_SPI_TXDR 0x0400
#define ROCKCHIP_SPI_RXDR 0x0800
@@ -156,6 +157,8 @@
#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
#define ROCKCHIP_SPI_MAX_CS_NUM 2
+#define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
+#define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
struct rockchip_spi {
struct device *dev;
@@ -206,17 +209,17 @@ static inline void wait_for_idle(struct rockchip_spi *rs)
static u32 get_fifo_len(struct rockchip_spi *rs)
{
- u32 fifo;
+ u32 ver;
- for (fifo = 2; fifo < 32; fifo++) {
- writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
- if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
- break;
- }
-
- writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
+ ver = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
- return (fifo == 31) ? 0 : fifo;
+ switch (ver) {
+ case ROCKCHIP_SPI_VER2_TYPE1:
+ case ROCKCHIP_SPI_VER2_TYPE2:
+ return 64;
+ default:
+ return 32;
+ }
}
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
@@ -288,7 +291,7 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
{
u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
- u32 rx_left = rs->rx_left - words;
+ u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
/* the hardware doesn't allow us to change fifo threshold
* level while spi is enabled, so instead make sure to leave
@@ -384,6 +387,19 @@ static void rockchip_spi_dma_txcb(void *data)
spi_finalize_current_transfer(ctlr);
}
+static u32 rockchip_spi_calc_burst_size(u32 data_len)
+{
+ u32 i;
+
+ /* burst size: 1, 2, 4, 8 */
+ for (i = 1; i < 8; i <<= 1) {
+ if (data_len & i)
+ break;
+ }
+
+ return i;
+}
+
static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
struct spi_controller *ctlr, struct spi_transfer *xfer)
{
@@ -397,7 +413,8 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.direction = DMA_DEV_TO_MEM,
.src_addr = rs->dma_addr_rx,
.src_addr_width = rs->n_bytes,
- .src_maxburst = 1,
+ .src_maxburst = rockchip_spi_calc_burst_size(xfer->len /
+ rs->n_bytes),
};
dmaengine_slave_config(ctlr->dma_rx, &rxconf);
@@ -525,7 +542,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
- writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
+ writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
+ rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
/* the hardware only supports an even clock divisor, so
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
new file mode 100644
index 000000000000..ed3e548227f4
--- /dev/null
+++ b/drivers/spi/spi-rpc-if.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// RPC-IF SPI/QSPI/Octa driver
+//
+// Copyright (C) 2018 ~ 2019 Renesas Solutions Corp.
+// Copyright (C) 2019 Macronix International Co., Ltd.
+// Copyright (C) 2019 - 2020 Cogent Embedded, Inc.
+//
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#include <memory/renesas-rpc-if.h>
+
+#include <asm/unaligned.h>
+
+static void rpcif_spi_mem_prepare(struct spi_device *spi_dev,
+ const struct spi_mem_op *spi_op,
+ u64 *offs, size_t *len)
+{
+ struct rpcif *rpc = spi_controller_get_devdata(spi_dev->controller);
+ struct rpcif_op rpc_op = { };
+
+ rpc_op.cmd.opcode = spi_op->cmd.opcode;
+ rpc_op.cmd.buswidth = spi_op->cmd.buswidth;
+
+ if (spi_op->addr.nbytes) {
+ rpc_op.addr.buswidth = spi_op->addr.buswidth;
+ rpc_op.addr.nbytes = spi_op->addr.nbytes;
+ rpc_op.addr.val = spi_op->addr.val;
+ }
+
+ if (spi_op->dummy.nbytes) {
+ rpc_op.dummy.buswidth = spi_op->dummy.buswidth;
+ rpc_op.dummy.ncycles = spi_op->dummy.nbytes * 8 /
+ spi_op->dummy.buswidth;
+ }
+
+ if (spi_op->data.nbytes || (offs && len)) {
+ rpc_op.data.buswidth = spi_op->data.buswidth;
+ rpc_op.data.nbytes = spi_op->data.nbytes;
+ switch (spi_op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ rpc_op.data.dir = RPCIF_DATA_IN;
+ rpc_op.data.buf.in = spi_op->data.buf.in;
+ break;
+ case SPI_MEM_DATA_OUT:
+ rpc_op.data.dir = RPCIF_DATA_OUT;
+ rpc_op.data.buf.out = spi_op->data.buf.out;
+ break;
+ case SPI_MEM_NO_DATA:
+ rpc_op.data.dir = RPCIF_NO_DATA;
+ break;
+ }
+ } else {
+ rpc_op.data.dir = RPCIF_NO_DATA;
+ }
+
+ rpcif_prepare(rpc, &rpc_op, offs, len);
+}
+
+static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
+ op->dummy.buswidth > 4 || op->cmd.buswidth > 4 ||
+ op->addr.nbytes > 4)
+ return false;
+
+ return true;
+}
+
+static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (offs + desc->info.offset + len > U32_MAX)
+ return -EINVAL;
+
+ rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
+
+ return rpcif_dirmap_read(rpc, offs, len, buf);
+}
+
+static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.offset + desc->info.length > U32_MAX)
+ return -ENOTSUPP;
+
+ if (!rpcif_spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
+ return -ENOTSUPP;
+
+ if (!rpc->dirmap && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -ENOTSUPP;
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int rpcif_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct rpcif *rpc =
+ spi_controller_get_devdata(mem->spi->controller);
+
+ rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL);
+
+ return rpcif_manual_xfer(rpc);
+}
+
+static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
+ .supports_op = rpcif_spi_mem_supports_op,
+ .exec_op = rpcif_spi_mem_exec_op,
+ .dirmap_create = rpcif_spi_mem_dirmap_create,
+ .dirmap_read = rpcif_spi_mem_dirmap_read,
+};
+
+static int rpcif_spi_probe(struct platform_device *pdev)
+{
+ struct device *parent = pdev->dev.parent;
+ struct spi_controller *ctlr;
+ struct rpcif *rpc;
+ int error;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*rpc));
+ if (!ctlr)
+ return -ENOMEM;
+
+ rpc = spi_controller_get_devdata(ctlr);
+ rpcif_sw_init(rpc, parent);
+
+ platform_set_drvdata(pdev, ctlr);
+
+ ctlr->dev.of_node = parent->of_node;
+
+ rpcif_enable_rpm(rpc);
+
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &rpcif_spi_mem_ops;
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+
+ rpcif_hw_init(rpc, false);
+
+ error = spi_register_controller(ctlr);
+ if (error) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
+ goto err_put_ctlr;
+ }
+ return 0;
+
+err_put_ctlr:
+ rpcif_disable_rpm(rpc);
+ spi_controller_put(ctlr);
+
+ return error;
+}
+
+static int rpcif_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct rpcif *rpc = spi_controller_get_devdata(ctlr);
+
+ spi_unregister_controller(ctlr);
+ rpcif_disable_rpm(rpc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rpcif_spi_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(ctlr);
+}
+
+static int rpcif_spi_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+
+ return spi_controller_resume(ctlr);
+}
+
+static SIMPLE_DEV_PM_OPS(rpcif_spi_pm_ops, rpcif_spi_suspend, rpcif_spi_resume);
+#define DEV_PM_OPS (&rpcif_spi_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver rpcif_spi_driver = {
+ .probe = rpcif_spi_probe,
+ .remove = rpcif_spi_remove,
+ .driver = {
+ .name = "rpc-if-spi",
+ .pm = DEV_PM_OPS,
+ },
+};
+module_platform_driver(rpcif_spi_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index cf67ea60dc0e..924b24441789 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -130,9 +130,11 @@ struct s3c64xx_spi_dma_data {
* @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
* @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @quirks: Bitmask of known quirks
* @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
* @clk_from_cmu: True, if the controller does not include a clock mux and
* prescaler unit.
+ * @clk_ioclk: True if clock is present on this device
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
* differ in some aspects such as the size of the fifo and spi bus clock
@@ -154,6 +156,7 @@ struct s3c64xx_spi_port_config {
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
* @ioclk: Pointer to the i/o clock between master and slave
+ * @pdev: Pointer to device's platform device data
* @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @lock: Controller specific lock.
@@ -166,7 +169,11 @@ struct s3c64xx_spi_port_config {
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
- * @cur_speed: Stores the active xfer clock speed.
+ * @cur_speed: Current clock speed
+ * @rx_dma: Local receive DMA data (e.g. chan and direction)
+ * @tx_dma: Local transmit DMA data (e.g. chan and direction)
+ * @port_conf: Local SPI port configuartion data
+ * @port_id: Port identification number
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index cbfac6596fad..1fdfc6e6691d 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -198,7 +198,7 @@ static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
{
- return SUN4I_FIFO_DEPTH - 1;
+ return SUN4I_MAX_XFER_SIZE - 1;
}
static int sun4i_spi_transfer_one(struct spi_master *master,
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fa11cc0e809b..19238e1b76b4 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -7,6 +7,7 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -58,10 +59,8 @@
#define SUN6I_FIFO_CTL_TF_RST BIT(31)
#define SUN6I_FIFO_STA_REG 0x1c
-#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
-#define SUN6I_FIFO_STA_RF_CNT_BITS 0
-#define SUN6I_FIFO_STA_TF_CNT_MASK 0x7f
-#define SUN6I_FIFO_STA_TF_CNT_BITS 16
+#define SUN6I_FIFO_STA_RF_CNT_MASK GENMASK(7, 0)
+#define SUN6I_FIFO_STA_TF_CNT_MASK GENMASK(23, 16)
#define SUN6I_CLK_CTL_REG 0x24
#define SUN6I_CLK_CTL_CDR2_MASK 0xff
@@ -73,13 +72,10 @@
#define SUN6I_MAX_XFER_SIZE 0xffffff
#define SUN6I_BURST_CNT_REG 0x30
-#define SUN6I_BURST_CNT(cnt) ((cnt) & SUN6I_MAX_XFER_SIZE)
#define SUN6I_XMIT_CNT_REG 0x34
-#define SUN6I_XMIT_CNT(cnt) ((cnt) & SUN6I_MAX_XFER_SIZE)
#define SUN6I_BURST_CTL_CNT_REG 0x38
-#define SUN6I_BURST_CTL_CNT_STC(cnt) ((cnt) & SUN6I_MAX_XFER_SIZE)
#define SUN6I_TXDATA_REG 0x200
#define SUN6I_RXDATA_REG 0x300
@@ -109,21 +105,18 @@ static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
writel(value, sspi->base_addr + reg);
}
-static inline u32 sun6i_spi_get_tx_fifo_count(struct sun6i_spi *sspi)
+static inline u32 sun6i_spi_get_rx_fifo_count(struct sun6i_spi *sspi)
{
u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
- reg >>= SUN6I_FIFO_STA_TF_CNT_BITS;
-
- return reg & SUN6I_FIFO_STA_TF_CNT_MASK;
+ return FIELD_GET(SUN6I_FIFO_STA_RF_CNT_MASK, reg);
}
-static inline void sun6i_spi_enable_interrupt(struct sun6i_spi *sspi, u32 mask)
+static inline u32 sun6i_spi_get_tx_fifo_count(struct sun6i_spi *sspi)
{
- u32 reg = sun6i_spi_read(sspi, SUN6I_INT_CTL_REG);
+ u32 reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
- reg |= mask;
- sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
+ return FIELD_GET(SUN6I_FIFO_STA_TF_CNT_MASK, reg);
}
static inline void sun6i_spi_disable_interrupt(struct sun6i_spi *sspi, u32 mask)
@@ -134,18 +127,13 @@ static inline void sun6i_spi_disable_interrupt(struct sun6i_spi *sspi, u32 mask)
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
}
-static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi)
{
- u32 reg, cnt;
+ u32 len;
u8 byte;
/* See how much data is available */
- reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
- reg &= SUN6I_FIFO_STA_RF_CNT_MASK;
- cnt = reg >> SUN6I_FIFO_STA_RF_CNT_BITS;
-
- if (len > cnt)
- len = cnt;
+ len = sun6i_spi_get_rx_fifo_count(sspi);
while (len--) {
byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
@@ -154,15 +142,16 @@ static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
}
}
-static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
{
u32 cnt;
+ int len;
u8 byte;
/* See how much data we can fit */
cnt = sspi->fifo_depth - sun6i_spi_get_tx_fifo_count(sspi);
- len = min3(len, (int)cnt, sspi->len);
+ len = min((int)cnt, sspi->len);
while (len--) {
byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
@@ -201,7 +190,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
- unsigned int tx_len = 0;
+ unsigned int tx_len = 0, rx_len = 0;
int ret = 0;
u32 reg;
@@ -256,10 +245,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
* If it's a TX only transfer, we don't want to fill the RX
* FIFO with bogus data
*/
- if (sspi->rx_buf)
+ if (sspi->rx_buf) {
reg &= ~SUN6I_TFR_CTL_DHB;
- else
+ rx_len = tfr->len;
+ } else {
reg |= SUN6I_TFR_CTL_DHB;
+ }
/* We want to control the chip select manually */
reg |= SUN6I_TFR_CTL_CS_MANUAL;
@@ -291,9 +282,11 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
+ tfr->effective_speed_hz = mclk_rate / (2 * div_cdr2);
} else {
div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
reg = SUN6I_CLK_CTL_CDR1(div);
+ tfr->effective_speed_hz = mclk_rate / (1 << div);
}
sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
@@ -303,20 +296,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
tx_len = tfr->len;
/* Setup the counters */
- sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
- sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
- sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
- SUN6I_BURST_CTL_CNT_STC(tx_len));
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, tfr->len);
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, tx_len);
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, tx_len);
/* Fill the TX FIFO */
- sun6i_spi_fill_fifo(sspi, sspi->fifo_depth);
+ sun6i_spi_fill_fifo(sspi);
/* Enable the interrupts */
- sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
- sun6i_spi_enable_interrupt(sspi, SUN6I_INT_CTL_TC |
- SUN6I_INT_CTL_RF_RDY);
+ reg = SUN6I_INT_CTL_TC;
+
+ if (rx_len > sspi->fifo_depth)
+ reg |= SUN6I_INT_CTL_RF_RDY;
if (tx_len > sspi->fifo_depth)
- sun6i_spi_enable_interrupt(sspi, SUN6I_INT_CTL_TF_ERQ);
+ reg |= SUN6I_INT_CTL_TF_ERQ;
+
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, reg);
/* Start the transfer */
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
@@ -333,10 +328,8 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
- goto out;
}
-out:
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
return ret;
@@ -350,14 +343,14 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
/* Transfer complete */
if (status & SUN6I_INT_CTL_TC) {
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
- sun6i_spi_drain_fifo(sspi, sspi->fifo_depth);
+ sun6i_spi_drain_fifo(sspi);
complete(&sspi->done);
return IRQ_HANDLED;
}
/* Receive FIFO 3/4 full */
if (status & SUN6I_INT_CTL_RF_RDY) {
- sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_drain_fifo(sspi);
/* Only clear the interrupt _after_ draining the FIFO */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_RF_RDY);
return IRQ_HANDLED;
@@ -365,7 +358,7 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
/* Transmit FIFO 3/4 empty */
if (status & SUN6I_INT_CTL_TF_ERQ) {
- sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+ sun6i_spi_fill_fifo(sspi);
if (!sspi->len)
/* nothing left to transmit */
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 366a3e5cca6b..3c41649698a5 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -2,7 +2,7 @@
/*
* TI QSPI driver
*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Sourav Poddar <sourav.poddar@ti.com>
*/
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index d7ea6af74743..6df2aeff2843 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -122,6 +122,7 @@ struct pch_spi_dma_ctrl {
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
+ * @io_base_addr: Base address
* @master: Pointer to the SPI master structure
* @work: Reference to work queue handler
* @wait: Wait queue for waking up upon receiving an
@@ -138,8 +139,8 @@ struct pch_spi_dma_ctrl {
* transfer
* @rx_index: Receive data count; for bookkeeping during
* transfer
- * @tx_buff: Buffer for data to be transmitted
- * @rx_index: Buffer for Received data
+ * @pkt_tx_buff: Buffer for data to be transmitted
+ * @pkt_rx_buff: Buffer for received data
* @n_curnt_chip: The chip number that this SPI driver currently
* operates on
* @current_chip: Reference to the current chip that this SPI
@@ -151,7 +152,10 @@ struct pch_spi_dma_ctrl {
* @board_dat: Reference to the SPI device data structure
* @plat_dev: platform_device structure
* @ch: SPI channel number
+ * @dma: Local DMA information
+ * @use_dma: True if DMA is to be used
* @irq_reg_sts: Status of IRQ registration
+ * @save_total_len: Save length while data is being transferred
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
@@ -1631,64 +1635,37 @@ static void pch_spi_remove(struct pci_dev *pdev)
kfree(pd_dev_save);
}
-#ifdef CONFIG_PM
-static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_spi_suspend(struct device *dev)
{
- int retval;
- struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+ struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ dev_dbg(dev, "%s ENTRY\n", __func__);
pd_dev_save->board_dat->suspend_sts = true;
- /* save config space */
- retval = pci_save_state(pdev);
- if (retval == 0) {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- } else {
- dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
- }
-
- return retval;
+ return 0;
}
-static int pch_spi_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_spi_resume(struct device *dev)
{
- int retval;
- struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ struct pch_pd_dev_save *pd_dev_save = dev_get_drvdata(dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ dev_dbg(dev, "%s ENTRY\n", __func__);
- retval = pci_enable_device(pdev);
- if (retval < 0) {
- dev_err(&pdev->dev,
- "%s pci_enable_device failed\n", __func__);
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
-
- /* set suspend status to false */
- pd_dev_save->board_dat->suspend_sts = false;
- }
+ /* set suspend status to false */
+ pd_dev_save->board_dat->suspend_sts = false;
- return retval;
+ return 0;
}
-#else
-#define pch_spi_suspend NULL
-#define pch_spi_resume NULL
-#endif
+static SIMPLE_DEV_PM_OPS(pch_spi_pm_ops, pch_spi_suspend, pch_spi_resume);
static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
.remove = pch_spi_remove,
- .suspend = pch_spi_suspend,
- .resume = pch_spi_resume,
+ .driver.pm = &pch_spi_pm_ops,
};
static int __init pch_spi_init(void)
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 17641157354d..5d8a5ee62fa2 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -119,6 +119,7 @@
/**
* struct zynq_qspi - Defines qspi driver instance
+ * @dev: Pointer to the this device's information
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
@@ -316,7 +317,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
/**
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
* @xqspi: Pointer to the zynq_qspi structure
- * @qspi: Pointer to the spi_device structure
+ * @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -527,20 +528,21 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
+ u8 opcode = op->cmd.opcode;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
- if (op->cmd.opcode) {
+ if (op->cmd.nbytes) {
reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = (u8 *)&op->cmd.opcode;
+ xqspi->txbuf = &opcode;
xqspi->rxbuf = NULL;
- xqspi->tx_bytes = sizeof(op->cmd.opcode);
- xqspi->rx_bytes = sizeof(op->cmd.opcode);
+ xqspi->tx_bytes = op->cmd.nbytes;
+ xqspi->rx_bytes = op->cmd.nbytes;
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 811c97a7c858..e17a20125255 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -197,8 +197,8 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
/**
* zynqmp_gqspi_selectslave: For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @flashcs: For chip select
- * @flashbus: To check which bus is selected- upper or lower
+ * @slavecs: For chip select
+ * @slavebus: To check which bus is selected- upper or lower
*/
static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
u8 slavecs, u8 slavebus)
@@ -892,7 +892,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
/**
* zynqmp_qspi_suspend: Suspend method for the QSPI driver
- * @_dev: Address of the platform_device structure
+ * @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
*
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8158e281f354..0b260484b4f5 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -778,6 +778,17 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
{
bool enable1 = enable;
+ /*
+ * Avoid calling into the driver (or doing delays) if the chip select
+ * isn't actually changing from the last time this was called.
+ */
+ if ((spi->controller->last_cs_enable == enable) &&
+ (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ return;
+
+ spi->controller->last_cs_enable = enable;
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+
if (!spi->controller->set_cs_timing) {
if (enable1)
spi_delay_exec(&spi->controller->cs_setup, NULL);
@@ -982,6 +993,8 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
+ ctlr->cur_msg_mapped = false;
+
return 0;
}
#else /* !CONFIG_HAS_DMA */
@@ -1234,8 +1247,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
+fallback_pio:
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ if (ctlr->cur_msg_mapped &&
+ (xfer->error & SPI_TRANS_FAIL_NO_START)) {
+ __spi_unmap_msg(ctlr, msg);
+ ctlr->fallback = true;
+ xfer->error &= ~SPI_TRANS_FAIL_NO_START;
+ goto fallback_pio;
+ }
+
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
@@ -1314,6 +1336,14 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
@@ -1346,7 +1376,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
/* If another context is idling the device then defer */
if (ctlr->idling) {
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1358,10 +1388,17 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* Only do teardown in the thread */
+ /* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
- &ctlr->pump_messages);
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1378,10 +1415,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
+ spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
@@ -1596,7 +1630,7 @@ static void spi_set_thread_rt(struct spi_controller *ctlr)
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
+ sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, &param);
}
static int spi_init_queue(struct spi_controller *ctlr)
@@ -1604,13 +1638,12 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->running = false;
ctlr->busy = false;
- kthread_init_worker(&ctlr->kworker);
- ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
- "%s", dev_name(&ctlr->dev));
- if (IS_ERR(ctlr->kworker_task)) {
- dev_err(&ctlr->dev, "failed to create message pump task\n");
- return PTR_ERR(ctlr->kworker_task);
+ ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
}
+
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
@@ -1693,7 +1726,8 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ ctlr->fallback = false;
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1719,7 +1753,7 @@ static int spi_start_queue(struct spi_controller *ctlr)
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
@@ -1775,8 +1809,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr)
return ret;
}
- kthread_flush_worker(&ctlr->kworker);
- kthread_stop(ctlr->kworker_task);
+ kthread_destroy_worker(ctlr->kworker);
return 0;
}
@@ -1799,7 +1832,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 59e07675ef86..455e99c4958e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -224,6 +224,11 @@ static int spidev_message(struct spidev_data *spidev,
for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
n;
n--, k_tmp++, u_tmp++) {
+ /* Ensure that also following allocations from rx_buf/tx_buf will meet
+ * DMA alignment requirements.
+ */
+ unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
+
k_tmp->len = u_tmp->len;
total += k_tmp->len;
@@ -239,17 +244,17 @@ static int spidev_message(struct spidev_data *spidev,
if (u_tmp->rx_buf) {
/* this transfer needs space in RX bounce buffer */
- rx_total += k_tmp->len;
+ rx_total += len_aligned;
if (rx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
}
k_tmp->rx_buf = rx_buf;
- rx_buf += k_tmp->len;
+ rx_buf += len_aligned;
}
if (u_tmp->tx_buf) {
/* this transfer needs space in TX bounce buffer */
- tx_total += k_tmp->len;
+ tx_total += len_aligned;
if (tx_total > bufsiz) {
status = -EMSGSIZE;
goto done;
@@ -259,7 +264,7 @@ static int spidev_message(struct spidev_data *spidev,
(uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
- tx_buf += k_tmp->len;
+ tx_buf += len_aligned;
}
k_tmp->cs_change = !!u_tmp->cs_change;
@@ -293,16 +298,16 @@ static int spidev_message(struct spidev_data *spidev,
goto done;
/* copy any rx data out of bounce buffer */
- rx_buf = spidev->rx_buffer;
- for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
if (u_tmp->rx_buf) {
if (copy_to_user((u8 __user *)
- (uintptr_t) u_tmp->rx_buf, rx_buf,
+ (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
}
- rx_buf += u_tmp->len;
}
}
status = total;
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 3861cb659cb9..6c647ba4ba0b 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -119,7 +119,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
- u32 uninitialized_var(tmp);
+ u32 tmp;
if (cc->dev->id.revision < 6) {
if (bus->bustype == SSB_BUSTYPE_SSB ||
@@ -149,7 +149,7 @@ static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc)
/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */
static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max)
{
- int uninitialized_var(limit);
+ int limit;
enum ssb_clksrc clocksrc;
int divisor = 1;
u32 tmp;
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 8e0f27e61652..509c8012d20b 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
static DEFINE_SPINLOCK(dim_lock);
static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
/**
* struct hdm_channel - private structure to keep channel specific data
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ab7dd8216006..9c71ad5af7b9 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -41,7 +41,7 @@
#endif
static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+static DECLARE_TASKLET_OLD(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 99698b8a3a74..b373b1b08b6d 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -17,6 +17,7 @@
#include <linux/tee_drv.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include "optee_private.h"
#include "optee_smc.h"
#include "shm_pool.h"
@@ -218,6 +219,11 @@ static void optee_get_version(struct tee_device *teedev,
*vers = v;
}
+static void optee_bus_scan(struct work_struct *work)
+{
+ WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
+}
+
static int optee_open(struct tee_context *ctx)
{
struct optee_context_data *ctxdata;
@@ -241,8 +247,18 @@ static int optee_open(struct tee_context *ctx)
kfree(ctxdata);
return -EBUSY;
}
- }
+ if (!optee->scan_bus_done) {
+ INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
+ optee->scan_bus_wq = create_workqueue("optee_bus_scan");
+ if (!optee->scan_bus_wq) {
+ kfree(ctxdata);
+ return -ECHILD;
+ }
+ queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ optee->scan_bus_done = true;
+ }
+ }
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
@@ -296,8 +312,13 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL;
- if (teedev == optee->supp_teedev)
+ if (teedev == optee->supp_teedev) {
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
+ }
optee_supp_release(&optee->supp);
+ }
}
static const struct tee_driver_ops optee_ops = {
@@ -675,7 +696,7 @@ static int optee_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, optee);
- rc = optee_enumerate_devices();
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) {
optee_remove(pdev);
return rc;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index e3a148521ec1..7a897d51969f 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -11,18 +11,6 @@
#include <linux/uuid.h>
#include "optee_private.h"
-/*
- * Get device UUIDs
- *
- * [out] memref[0] Array of device UUIDs
- *
- * Return codes:
- * TEE_SUCCESS - Invoke command success
- * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
- * TEE_ERROR_SHORT_BUFFER - Output buffer size less than required
- */
-#define PTA_CMD_GET_DEVICES 0x0
-
static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
if (ver->impl_id == TEE_IMPL_ID_OPTEE)
@@ -32,7 +20,8 @@ static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
}
static int get_devices(struct tee_context *ctx, u32 session,
- struct tee_shm *device_shm, u32 *shm_size)
+ struct tee_shm *device_shm, u32 *shm_size,
+ u32 func)
{
int ret = 0;
struct tee_ioctl_invoke_arg inv_arg;
@@ -41,8 +30,7 @@ static int get_devices(struct tee_context *ctx, u32 session,
memset(&inv_arg, 0, sizeof(inv_arg));
memset(&param, 0, sizeof(param));
- /* Invoke PTA_CMD_GET_DEVICES function */
- inv_arg.func = PTA_CMD_GET_DEVICES;
+ inv_arg.func = func;
inv_arg.session = session;
inv_arg.num_params = 4;
@@ -65,7 +53,7 @@ static int get_devices(struct tee_context *ctx, u32 session,
return 0;
}
-static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
+static int optee_register_device(const uuid_t *device_uuid)
{
struct tee_client_device *optee_device = NULL;
int rc;
@@ -75,7 +63,10 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
return -ENOMEM;
optee_device->dev.bus = &tee_bus_type;
- dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
+ if (dev_set_name(&optee_device->dev, "optee-ta-%pUb", device_uuid)) {
+ kfree(optee_device);
+ return -ENOMEM;
+ }
uuid_copy(&optee_device->id.uuid, device_uuid);
rc = device_register(&optee_device->dev);
@@ -87,7 +78,7 @@ static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
return rc;
}
-int optee_enumerate_devices(void)
+static int __optee_enumerate_devices(u32 func)
{
const uuid_t pta_uuid =
UUID_INIT(0x7011a688, 0xddde, 0x4053,
@@ -118,7 +109,7 @@ int optee_enumerate_devices(void)
goto out_ctx;
}
- rc = get_devices(ctx, sess_arg.session, NULL, &shm_size);
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size, func);
if (rc < 0 || !shm_size)
goto out_sess;
@@ -130,7 +121,7 @@ int optee_enumerate_devices(void)
goto out_sess;
}
- rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size);
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size, func);
if (rc < 0)
goto out_shm;
@@ -144,7 +135,7 @@ int optee_enumerate_devices(void)
num_devices = shm_size / sizeof(uuid_t);
for (idx = 0; idx < num_devices; idx++) {
- rc = optee_register_device(&device_uuid[idx], idx);
+ rc = optee_register_device(&device_uuid[idx]);
if (rc)
goto out_shm;
}
@@ -158,3 +149,8 @@ out_ctx:
return rc;
}
+
+int optee_enumerate_devices(u32 func)
+{
+ return __optee_enumerate_devices(func);
+}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index d9c5037b4e03..8b71839a357e 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -78,6 +78,9 @@ struct optee_supp {
* @memremaped_shm virtual address of memory in shared memory pool
* @sec_caps: secure world capabilities defined by
* OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @scan_bus_done flag if device registation was already done.
+ * @scan_bus_wq workqueue to scan optee bus and register optee drivers
+ * @scan_bus_work workq to scan optee bus and register optee drivers
*/
struct optee {
struct tee_device *supp_teedev;
@@ -89,6 +92,9 @@ struct optee {
struct tee_shm_pool *pool;
void *memremaped_shm;
u32 sec_caps;
+ bool scan_bus_done;
+ struct workqueue_struct *scan_bus_wq;
+ struct work_struct scan_bus_work;
};
struct optee_session {
@@ -173,7 +179,9 @@ void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
-int optee_enumerate_devices(void);
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
/*
* Small helpers
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 6c0e1b053126..6cf23a54e853 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -333,18 +333,18 @@ static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
return false;
policy = cpufreq_cdev->policy;
- if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) {
+ if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) {
pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
- cpumask_pr_args(to_cpumask(em->cpus)),
+ cpumask_pr_args(em_span_cpus(em)),
cpumask_pr_args(policy->related_cpus));
return false;
}
nr_levels = cpufreq_cdev->max_level + 1;
- if (em->nr_cap_states != nr_levels) {
- pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
- cpumask_pr_args(to_cpumask(em->cpus)),
- em->nr_cap_states, nr_levels);
+ if (em_pd_nr_perf_states(em) != nr_levels) {
+ pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
+ cpumask_pr_args(em_span_cpus(em)),
+ em_pd_nr_perf_states(em), nr_levels);
return false;
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index a6aabfd6e2da..097266342e5e 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -3643,7 +3643,7 @@ static int cy_pci_probe(struct pci_dev *pdev,
struct cyclades_card *card;
void __iomem *addr0 = NULL, *addr2 = NULL;
char *card_name = NULL;
- u32 uninitialized_var(mailbox);
+ u32 mailbox;
unsigned int device_id, nchan = 0, card_no, i, j;
unsigned char plx_ver;
int retval, irq;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 5ef08905fe05..2a0e51a20e34 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -603,7 +603,7 @@ static void xen_hvm_early_write(uint32_t vtermno, const char *str, int len) { }
#endif
#ifdef CONFIG_EARLY_PRINTK
-static int __init xenboot_setup_console(struct console *console, char *string)
+static int __init xenboot_console_setup(struct console *console, char *string)
{
static struct xencons_info xenboot;
@@ -647,7 +647,7 @@ static void xenboot_write_console(struct console *console, const char *string,
struct console xenboot_console = {
.name = "xenboot",
.write = xenboot_write_console,
- .setup = xenboot_setup_console,
+ .setup = xenboot_console_setup,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 66f95f758be0..e8c58f9bd263 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -1128,7 +1128,7 @@ static int __init hvsi_console_setup(struct console *console, char *options)
int ret;
if (console->index < 0 || console->index >= hvsi_count)
- return -1;
+ return -EINVAL;
hp = &hvsi_ports[console->index];
/* give the FSP a chance to change the baud rate when we re-open */
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index fc38f96475bf..3b2f9fb01aa0 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1514,7 +1514,7 @@ static unsigned int card_count;
static int isicom_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned int uninitialized_var(signature), index;
+ unsigned int signature, index;
int retval = -EPERM;
struct isi_board *board = NULL;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 457c0bf8cbf8..07b7b6b05b8b 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -102,11 +103,19 @@
#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
#define IO_MACRO_IO2_IO3_SWAP 0x4640
-#ifdef CONFIG_CONSOLE_POLL
-#define CONSOLE_RX_BYTES_PW 1
-#else
-#define CONSOLE_RX_BYTES_PW 4
-#endif
+/* We always configure 4 bytes per FIFO word */
+#define BYTES_PER_FIFO_WORD 4
+
+struct qcom_geni_private_data {
+ /* NOTE: earlycon port will have NULL here */
+ struct uart_driver *drv;
+
+ u32 poll_cached_bytes;
+ unsigned int poll_cached_bytes_cnt;
+
+ u32 write_cached_bytes;
+ unsigned int write_cached_bytes_cnt;
+};
struct qcom_geni_serial_port {
struct uart_port uport;
@@ -118,8 +127,6 @@ struct qcom_geni_serial_port {
bool setup;
int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
unsigned int baud;
- unsigned int tx_bytes_pw;
- unsigned int rx_bytes_pw;
void *rx_fifo;
u32 loopback;
bool brk;
@@ -128,6 +135,8 @@ struct qcom_geni_serial_port {
int wakeup_irq;
bool rx_tx_swap;
bool cts_rts_swap;
+
+ struct qcom_geni_private_data private_data;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -263,8 +272,9 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
unsigned int baud;
unsigned int fifo_bits;
unsigned long timeout_us = 20000;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- if (uport->private_data) {
+ if (private_data->drv) {
port = to_dev_port(uport, uport);
baud = port->baud;
if (!baud)
@@ -330,23 +340,42 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
}
#ifdef CONFIG_CONSOLE_POLL
+
static int qcom_geni_serial_get_char(struct uart_port *uport)
{
- u32 rx_fifo;
+ struct qcom_geni_private_data *private_data = uport->private_data;
u32 status;
+ u32 word_cnt;
+ int ret;
+
+ if (!private_data->poll_cached_bytes_cnt) {
+ status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+
+ status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
+ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ if (!word_cnt)
+ return NO_POLL_CHAR;
- status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
- writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ if (word_cnt == 1 && (status & RX_LAST))
+ private_data->poll_cached_bytes_cnt =
+ (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
+ else
+ private_data->poll_cached_bytes_cnt = 4;
- status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
- writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ private_data->poll_cached_bytes =
+ readl(uport->membase + SE_GENI_RX_FIFOn);
+ }
- status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
- if (!(status & RX_FIFO_WC_MSK))
- return NO_POLL_CHAR;
+ private_data->poll_cached_bytes_cnt--;
+ ret = private_data->poll_cached_bytes & 0xff;
+ private_data->poll_cached_bytes >>= 8;
- rx_fifo = readl(uport->membase + SE_GENI_RX_FIFOn);
- return rx_fifo & 0xff;
+ return ret;
}
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
@@ -365,13 +394,25 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
{
- writel(ch, uport->membase + SE_GENI_TX_FIFOn);
+ struct qcom_geni_private_data *private_data = uport->private_data;
+
+ private_data->write_cached_bytes =
+ (private_data->write_cached_bytes >> 8) | (ch << 24);
+ private_data->write_cached_bytes_cnt++;
+
+ if (private_data->write_cached_bytes_cnt == BYTES_PER_FIFO_WORD) {
+ writel(private_data->write_cached_bytes,
+ uport->membase + SE_GENI_TX_FIFOn);
+ private_data->write_cached_bytes_cnt = 0;
+ }
}
static void
__qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
unsigned int count)
{
+ struct qcom_geni_private_data *private_data = uport->private_data;
+
int i;
u32 bytes_to_send = count;
@@ -406,6 +447,15 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
SE_GENI_M_IRQ_CLEAR);
i += chars_to_write;
}
+
+ if (private_data->write_cached_bytes_cnt) {
+ private_data->write_cached_bytes >>= BITS_PER_BYTE *
+ (BYTES_PER_FIFO_WORD - private_data->write_cached_bytes_cnt);
+ writel(private_data->write_cached_bytes,
+ uport->membase + SE_GENI_TX_FIFOn);
+ private_data->write_cached_bytes_cnt = 0;
+ }
+
qcom_geni_serial_poll_tx_done(uport);
}
@@ -478,7 +528,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
tport = &uport->state->port;
for (i = 0; i < bytes; ) {
int c;
- int chunk = min_t(int, bytes - i, port->rx_bytes_pw);
+ int chunk = min_t(int, bytes - i, BYTES_PER_FIFO_WORD);
ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, buf, 1);
i += chunk;
@@ -658,11 +708,11 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
if (!word_cnt)
return;
- total_bytes = port->rx_bytes_pw * (word_cnt - 1);
+ total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1);
if (last_word_partial && last_word_byte_cnt)
total_bytes += last_word_byte_cnt;
else
- total_bytes += port->rx_bytes_pw;
+ total_bytes += BYTES_PER_FIFO_WORD;
port->handle_rx(uport, total_bytes, drop);
}
@@ -695,7 +745,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
}
avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
- avail *= port->tx_bytes_pw;
+ avail *= BYTES_PER_FIFO_WORD;
tail = xmit->tail;
chunk = min(avail, pending);
@@ -719,7 +769,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
int c;
memset(buf, 0, ARRAY_SIZE(buf));
- tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw);
+ tx_bytes = min_t(size_t, remaining, BYTES_PER_FIFO_WORD);
for (c = 0; c < tx_bytes ; c++) {
buf[c] = xmit->buf[tail++];
@@ -836,14 +886,6 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
u32 proto;
u32 pin_swap;
- if (uart_console(uport)) {
- port->tx_bytes_pw = 1;
- port->rx_bytes_pw = CONSOLE_RX_BYTES_PW;
- } else {
- port->tx_bytes_pw = 4;
- port->rx_bytes_pw = 4;
- }
-
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto);
@@ -875,10 +917,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
*/
if (uart_console(uport))
qcom_geni_serial_poll_tx_done(uport);
- geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
- false, true, false);
- geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
- false, false, true);
+ geni_se_config_packing(&port->se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
+ false, true, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, GENI_SE_FIFO);
port->setup = true;
@@ -945,6 +985,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
unsigned long clk_rate;
u32 ver, sampling_rate;
+ unsigned int avg_bw_core;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
@@ -962,10 +1003,20 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
goto out_restart_rx;
uport->uartclk = clk_rate;
- clk_set_rate(port->se.clk, clk_rate);
+ dev_pm_opp_set_rate(uport->dev, clk_rate);
ser_clk_cfg = SER_CLK_EN;
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+ /*
+ * Bump up BW vote on CPU and CORE path as driver supports FIFO mode
+ * only.
+ */
+ avg_bw_core = (baud > 115200) ? Bps_to_icc(CORE_2X_50_MHZ)
+ : GENI_DEFAULT_BW;
+ port->se.icc_paths[GENI_TO_CORE].avg_bw = avg_bw_core;
+ port->se.icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(baud);
+ geni_icc_set_bw(&port->se);
+
/* parity */
tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
@@ -1121,6 +1172,14 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
struct console *con) { }
#endif
+static int qcom_geni_serial_earlycon_exit(struct console *con)
+{
+ geni_remove_earlycon_icc_vote();
+ return 0;
+}
+
+static struct qcom_geni_private_data earlycon_private_data;
+
static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
const char *opt)
{
@@ -1136,6 +1195,8 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
if (!uport->membase)
return -EINVAL;
+ uport->private_data = &earlycon_private_data;
+
memset(&se, 0, sizeof(se));
se.base = uport->membase;
if (geni_se_read_proto(&se) != GENI_SE_UART)
@@ -1153,7 +1214,8 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
*/
qcom_geni_serial_poll_tx_done(uport);
qcom_geni_serial_abort_rx(uport);
- geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
+ geni_se_config_packing(&se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
+ false, true, true);
geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
geni_se_select_mode(&se, GENI_SE_FIFO);
@@ -1166,6 +1228,7 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
+ dev->con->exit = qcom_geni_serial_earlycon_exit;
dev->con->setup = NULL;
qcom_geni_serial_enable_early_read(&se, dev->con);
@@ -1228,11 +1291,14 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
if (old_state == UART_PM_STATE_UNDEFINED)
old_state = UART_PM_STATE_OFF;
- if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+ if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
+ geni_icc_enable(&port->se);
geni_se_resources_on(&port->se);
- else if (new_state == UART_PM_STATE_OFF &&
- old_state == UART_PM_STATE_ON)
+ } else if (new_state == UART_PM_STATE_OFF &&
+ old_state == UART_PM_STATE_ON) {
geni_se_resources_off(&port->se);
+ geni_icc_disable(&port->se);
+ }
}
static const struct uart_ops qcom_geni_console_pops = {
@@ -1330,6 +1396,17 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ ret = geni_icc_get(&port->se, NULL);
+ if (ret)
+ return ret;
+ port->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
+ port->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ /* Set BW for register access */
+ ret = geni_icc_set_bw(&port->se);
+ if (ret)
+ return ret;
+
port->name = devm_kasprintf(uport->dev, GFP_KERNEL,
"qcom_geni_serial_%s%d",
uart_console(uport) ? "console" : "uart", uport->line);
@@ -1351,13 +1428,26 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap"))
port->cts_rts_swap = true;
- uport->private_data = drv;
+ port->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se");
+ if (IS_ERR(port->se.opp_table))
+ return PTR_ERR(port->se.opp_table);
+ /* OPP table is optional */
+ ret = dev_pm_opp_of_add_table(&pdev->dev);
+ if (!ret) {
+ port->se.has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ port->private_data.drv = drv;
+ uport->private_data = &port->private_data;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
ret = uart_add_one_port(drv, uport);
if (ret)
- return ret;
+ goto err;
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
@@ -1365,7 +1455,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
uart_remove_one_port(drv, uport);
- return ret;
+ goto err;
}
/*
@@ -1382,18 +1472,26 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
if (ret) {
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, uport);
- return ret;
+ goto err;
}
}
return 0;
+err:
+ if (port->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(port->se.opp_table);
+ return ret;
}
static int qcom_geni_serial_remove(struct platform_device *pdev)
{
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
- struct uart_driver *drv = port->uport.private_data;
+ struct uart_driver *drv = port->private_data.drv;
+ if (port->se.has_opp_table)
+ dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_put_clkname(port->se.opp_table);
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
@@ -1405,16 +1503,32 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
{
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- return uart_suspend_port(uport->private_data, uport);
+ /*
+ * This is done so we can hit the lowest possible state in suspend
+ * even with no_console_suspend
+ */
+ if (uart_console(uport)) {
+ geni_icc_set_tag(&port->se, 0x3);
+ geni_icc_set_bw(&port->se);
+ }
+ return uart_suspend_port(private_data->drv, uport);
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
{
+ int ret;
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ struct qcom_geni_private_data *private_data = uport->private_data;
- return uart_resume_port(uport->private_data, uport);
+ ret = uart_resume_port(private_data->drv, uport);
+ if (uart_console(uport)) {
+ geni_icc_set_tag(&port->se, 0x7);
+ geni_icc_set_bw(&port->se);
+ }
+ return ret;
}
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 1eb703c980e0..bab551f46963 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -886,7 +886,7 @@ static int sunsab_console_setup(struct console *con, char *options)
* though...
*/
if (up->port.type != PORT_SUNSAB)
- return -1;
+ return -EINVAL;
printk("Console: ttyS%d (SAB82532)\n",
(sunsab_reg.minor - 64) + con->index);
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 103ab8c556e7..7ea06bbc6197 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1221,7 +1221,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
int baud, brg;
if (up->port.type != PORT_SUNZILOG)
- return -1;
+ return -EINVAL;
printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
(sunzilog_reg.minor - 64) + con->index, con->index);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 7c95afa905a0..a8e39b2cdd55 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -403,7 +403,6 @@ static const struct sysrq_key_op sysrq_moom_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-#ifdef CONFIG_BLOCK
static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
@@ -414,7 +413,6 @@ static const struct sysrq_key_op sysrq_thaw_op = {
.action_msg = "Emergency Thaw of all frozen filesystems",
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-#endif
static void sysrq_handle_kill(int key)
{
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 568b2171f335..f80199984ee0 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1236,7 +1236,7 @@ static void kbd_bh(unsigned long dummy)
}
}
-DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
+DECLARE_TASKLET_DISABLED_OLD(keyboard_tasklet, kbd_bh);
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 6e27fe4fcca3..ec7f66f4555a 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -199,6 +200,24 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
goto bad1;
uioinfo->irq = ret;
}
+
+ if (uioinfo->irq) {
+ struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
+
+ /*
+ * If a level interrupt, dont do lazy disable. Otherwise the
+ * irq will fire again since clearing of the actual cause, on
+ * device level, is done in userspace
+ * irqd_is_level_type() isn't used since isn't valid until
+ * irq is configured.
+ */
+ if (irq_data &&
+ irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
+ dev_dbg(&pdev->dev, "disable lazy unmask\n");
+ irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
+ }
+ }
+
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index b60173bc93ce..63258b6accc4 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -20,6 +20,7 @@
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -171,6 +172,23 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
}
}
+ if (uioinfo->irq) {
+ struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
+
+ /*
+ * If a level interrupt, dont do lazy disable. Otherwise the
+ * irq will fire again since clearing of the actual cause, on
+ * device level, is done in userspace
+ * irqd_is_level_type() isn't used since isn't valid until
+ * irq is configured.
+ */
+ if (irq_data &&
+ irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
+ dev_dbg(&pdev->dev, "disable lazy unmask\n");
+ irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
+ }
+ }
+
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
@@ -187,8 +205,10 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
}
uiomem->memtype = UIO_MEM_PHYS;
- uiomem->addr = r->start;
- uiomem->size = resource_size(r);
+ uiomem->addr = r->start & PAGE_MASK;
+ uiomem->offs = r->start & ~PAGE_MASK;
+ uiomem->size = (uiomem->offs + resource_size(r)
+ + PAGE_SIZE - 1) & PAGE_MASK;
uiomem->name = r->name;
++uiomem;
}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 465d0b7c6522..4a112670cc6c 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1926,7 +1926,7 @@ static const struct usb_ep_ops lpc32xx_ep_ops = {
};
/* Send a ZLP on a non-0 IN EP */
-void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+static void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
/* Clear EP status */
udc_clearep_getsts(udc, ep->hwep_num);
@@ -1940,7 +1940,7 @@ void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
* This function will only be called when a delayed ZLP needs to be sent out
* after a DMA transfer has filled both buffers.
*/
-void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
+static void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 epstatus;
struct lpc32xx_request *req;
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 3fcded31405a..e76f1a50b0fc 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -96,9 +96,7 @@ static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
/* tasklet for usb disconnect */
-static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
- (unsigned long) &udc);
-
+static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect);
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
@@ -1661,7 +1659,7 @@ static void usb_disconnect(struct udc *dev)
/* Tasklet for disconnect to be outside of interrupt context */
static void udc_tasklet_disconnect(unsigned long par)
{
- struct udc *dev = (struct udc *)(*((struct udc **) par));
+ struct udc *dev = udc;
u32 tmp;
DBG(dev, "Tasklet disconnect\n");
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 3235d5307403..5c423f240a1f 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -677,7 +677,7 @@ static void process_done_list(unsigned long data)
enable_irq(fhci_to_hcd(fhci)->irq);
}
-DECLARE_TASKLET(fhci_tasklet, process_done_list, 0);
+DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list);
/* transfer complted callback */
u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index b4d6d9bb3239..c545b27ea568 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1146,7 +1146,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
- unsigned long uninitialized_var(flags);
+ unsigned long flags;
cppi = container_of(musb->dma_controller, struct cppi, controller);
if (cppi->irq)
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index ba955d65eb0e..c8a988d2cfdd 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -554,8 +554,8 @@ static int sddr55_reset(struct us_data *us)
static unsigned long sddr55_get_capacity(struct us_data *us) {
- unsigned char uninitialized_var(manufacturerID);
- unsigned char uninitialized_var(deviceID);
+ unsigned char manufacturerID;
+ unsigned char deviceID;
int result;
struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 3e1ceb8e9f2b..d93a69b12f81 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -11,6 +11,7 @@ if VDPA
config VDPA_SIM
tristate "vDPA device simulator"
depends on RUNTIME_TESTING_MENU && HAS_DMA
+ select DMA_OPS
select VHOST_RING
default n
help
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e992decfec53..eea902b83afe 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -862,7 +862,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
struct tun_msg_ctl ctl;
size_t len, total_len = 0;
int err;
- struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
+ struct vhost_net_ubuf_ref *ubufs;
bool zcopy_used;
int sent_pkts = 0;
@@ -1042,7 +1042,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
/* len is always initialized before use since we are always called with
* datalen > 0.
*/
- u32 uninitialized_var(len);
+ u32 len;
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
@@ -1099,7 +1099,7 @@ static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
- unsigned uninitialized_var(in), log;
+ unsigned in, log;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6fb4d7ecfa19..b22adf03f584 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- switch (v_req.type) {
+ switch (vhost32_to_cpu(vq, v_req.type)) {
case VIRTIO_SCSI_T_TMF:
vc.req = &v_req.tmf;
vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d7b8df3edffc..74d135ee7e26 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2092,11 +2092,6 @@ static int get_indirect(struct vhost_virtqueue *vq,
return ret;
}
iov_iter_init(&from, READ, vq->indirect, ret, len);
-
- /* We will use the result as an address to read from, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 0f559aeaf469..32a2698914c3 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2198,17 +2198,6 @@ config FB_BROADSHEET
and could also have been called by other names when coupled with
a bridge adapter.
-config FB_PUV3_UNIGFX
- tristate "PKUnity v3 Unigfx framebuffer support"
- depends on FB && UNICORE32 && ARCH_PUV3
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- help
- Choose this option if you want to use the Unigfx device as a
- framebuffer device. Without the support of PCI & AGP.
-
config FB_HYPERV
tristate "Microsoft Hyper-V Synthetic Video support"
depends on FB && HYPERV
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index aa6352798cf4..a0705b99e643 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -116,7 +116,6 @@ obj-y += omap2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
-obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
obj-$(CONFIG_FB_OPENCORES) += ocfb.o
obj-$(CONFIG_FB_SM712) += sm712fb.o
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
deleted file mode 100644
index 030e85c11a78..000000000000
--- a/drivers/video/fbdev/fb-puv3.c
+++ /dev/null
@@ -1,836 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Frame Buffer Driver for PKUnity-v3 Unigfx
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
- * Copyright (C) 2001-2010 Guan Xuetao
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/mm.h>
-
-#include <linux/sizes.h>
-#include <mach/hardware.h>
-
-/* Platform_data reserved for unifb registers. */
-#define UNIFB_REGS_NUM 10
-/* RAM reserved for the frame buffer. */
-#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
-
-/*
- * cause UNIGFX don not have EDID
- * all the modes are organized as follow
- */
-static const struct fb_videomode unifb_modes[] = {
- /* 0 640x480-60 VESA */
- { "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 1 640x480-75 VESA */
- { "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 2 800x600-60 VESA */
- { "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 3 800x600-75 VESA */
- { "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 4 1024x768-60 VESA */
- { "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 5 1024x768-75 VESA */
- { "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 6 1280x960-60 VESA */
- { "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 7 1440x900-60 VESA */
- { "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 8 FIXME 9 1024x600-60 VESA UNTESTED */
- { "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 9 FIXME 10 1024x600-75 VESA UNTESTED */
- { "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
- /* 10 FIXME 11 1366x768-60 VESA UNTESTED */
- { "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
-};
-
-static const struct fb_var_screeninfo unifb_default = {
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 16,
- .red = { 11, 5, 0 },
- .green = { 5, 6, 0 },
- .blue = { 0, 5, 0 },
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .pixclock = 25175000,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 33,
- .lower_margin = 10,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED,
-};
-
-static struct fb_fix_screeninfo unifb_fix = {
- .id = "UNIGFX FB",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
- .xpanstep = 1,
- .ypanstep = 1,
- .ywrapstep = 1,
- .accel = FB_ACCEL_NONE,
-};
-
-static void unifb_sync(struct fb_info *info)
-{
- /* TODO: may, this can be replaced by interrupt */
- int cnt;
-
- for (cnt = 0; cnt < 0x10000000; cnt++) {
- if (readl(UGE_COMMAND) & 0x1000000)
- return;
- }
-
- if (cnt > 0x8000000)
- dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
-}
-
-static void unifb_prim_fillrect(struct fb_info *info,
- const struct fb_fillrect *region)
-{
- int awidth = region->width;
- int aheight = region->height;
- int m_iBpp = info->var.bits_per_pixel;
- int screen_width = info->var.xres;
- int src_sel = 1; /* from fg_color */
- int pat_sel = 1;
- int src_x0 = 0;
- int dst_x0 = region->dx;
- int src_y0 = 0;
- int dst_y0 = region->dy;
- int rop_alpha_sel = 0;
- int rop_alpha_code = 0xCC;
- int x_dir = 1;
- int y_dir = 1;
- int alpha_r = 0;
- int alpha_sel = 0;
- int dst_pitch = screen_width * (m_iBpp / 8);
- int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
- int src_pitch = screen_width * (m_iBpp / 8);
- int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
- unsigned int command = 0;
- int clip_region = 0;
- int clip_en = 0;
- int tp_en = 0;
- int fg_color = 0;
- int bottom = info->var.yres - 1;
- int right = info->var.xres - 1;
- int top = 0;
-
- bottom = (bottom << 16) | right;
- command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
- | (x_dir << 20) | (y_dir << 21) | (command << 24)
- | (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
- src_pitch = (dst_pitch << 16) | src_pitch;
- awidth = awidth | (aheight << 16);
- alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
- | (alpha_sel << 16);
- src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
- dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
- fg_color = region->color;
-
- unifb_sync(info);
-
- writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
- writel(0, UGE_BCOLOR);
- writel(src_pitch, UGE_PITCH);
- writel(src_offset, UGE_SRCSTART);
- writel(dst_offset, UGE_DSTSTART);
- writel(awidth, UGE_WIDHEIGHT);
- writel(top, UGE_CLIP0);
- writel(bottom, UGE_CLIP1);
- writel(alpha_r, UGE_ROPALPHA);
- writel(src_x0, UGE_SRCXY);
- writel(dst_x0, UGE_DSTXY);
- writel(command, UGE_COMMAND);
-}
-
-static void unifb_fillrect(struct fb_info *info,
- const struct fb_fillrect *region)
-{
- struct fb_fillrect modded;
- int vxres, vyres;
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- sys_fillrect(info, region);
- return;
- }
-
- vxres = info->var.xres_virtual;
- vyres = info->var.yres_virtual;
-
- memcpy(&modded, region, sizeof(struct fb_fillrect));
-
- if (!modded.width || !modded.height ||
- modded.dx >= vxres || modded.dy >= vyres)
- return;
-
- if (modded.dx + modded.width > vxres)
- modded.width = vxres - modded.dx;
- if (modded.dy + modded.height > vyres)
- modded.height = vyres - modded.dy;
-
- unifb_prim_fillrect(info, &modded);
-}
-
-static void unifb_prim_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- int awidth = area->width;
- int aheight = area->height;
- int m_iBpp = info->var.bits_per_pixel;
- int screen_width = info->var.xres;
- int src_sel = 2; /* from mem */
- int pat_sel = 0;
- int src_x0 = area->sx;
- int dst_x0 = area->dx;
- int src_y0 = area->sy;
- int dst_y0 = area->dy;
-
- int rop_alpha_sel = 0;
- int rop_alpha_code = 0xCC;
- int x_dir = 1;
- int y_dir = 1;
-
- int alpha_r = 0;
- int alpha_sel = 0;
- int dst_pitch = screen_width * (m_iBpp / 8);
- int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
- int src_pitch = screen_width * (m_iBpp / 8);
- int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
- unsigned int command = 0;
- int clip_region = 0;
- int clip_en = 1;
- int tp_en = 0;
- int top = 0;
- int bottom = info->var.yres;
- int right = info->var.xres;
- int fg_color = 0;
- int bg_color = 0;
-
- if (src_x0 < 0)
- src_x0 = 0;
- if (src_y0 < 0)
- src_y0 = 0;
-
- if (src_y0 - dst_y0 > 0) {
- y_dir = 1;
- } else {
- y_dir = 0;
- src_offset = (src_y0 + aheight) * src_pitch +
- src_x0 * (m_iBpp / 8);
- dst_offset = (dst_y0 + aheight) * dst_pitch +
- dst_x0 * (m_iBpp / 8);
- src_y0 += aheight;
- dst_y0 += aheight;
- }
-
- command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
- (x_dir << 20) | (y_dir << 21) | (command << 24) |
- (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
- src_pitch = (dst_pitch << 16) | src_pitch;
- awidth = awidth | (aheight << 16);
- alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
- (alpha_sel << 16);
- src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
- dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
- bottom = (bottom << 16) | right;
-
- unifb_sync(info);
-
- writel(src_pitch, UGE_PITCH);
- writel(src_offset, UGE_SRCSTART);
- writel(dst_offset, UGE_DSTSTART);
- writel(awidth, UGE_WIDHEIGHT);
- writel(top, UGE_CLIP0);
- writel(bottom, UGE_CLIP1);
- writel(bg_color, UGE_BCOLOR);
- writel(fg_color, UGE_FCOLOR);
- writel(alpha_r, UGE_ROPALPHA);
- writel(src_x0, UGE_SRCXY);
- writel(dst_x0, UGE_DSTXY);
- writel(command, UGE_COMMAND);
-}
-
-static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
-{
- struct fb_copyarea modded;
- u32 vxres, vyres;
- modded.sx = area->sx;
- modded.sy = area->sy;
- modded.dx = area->dx;
- modded.dy = area->dy;
- modded.width = area->width;
- modded.height = area->height;
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- sys_copyarea(info, area);
- return;
- }
-
- vxres = info->var.xres_virtual;
- vyres = info->var.yres_virtual;
-
- if (!modded.width || !modded.height ||
- modded.sx >= vxres || modded.sy >= vyres ||
- modded.dx >= vxres || modded.dy >= vyres)
- return;
-
- if (modded.sx + modded.width > vxres)
- modded.width = vxres - modded.sx;
- if (modded.dx + modded.width > vxres)
- modded.width = vxres - modded.dx;
- if (modded.sy + modded.height > vyres)
- modded.height = vyres - modded.sy;
- if (modded.dy + modded.height > vyres)
- modded.height = vyres - modded.dy;
-
- unifb_prim_copyarea(info, &modded);
-}
-
-static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- sys_imageblit(info, image);
-}
-
-static u_long get_line_length(int xres_virtual, int bpp)
-{
- u_long length;
-
- length = xres_virtual * bpp;
- length = (length + 31) & ~31;
- length >>= 3;
- return length;
-}
-
-/*
- * Setting the video mode has been split into two parts.
- * First part, xxxfb_check_var, must not write anything
- * to hardware, it should only verify and adjust var.
- * This means it doesn't alter par but it does use hardware
- * data from it to check this var.
- */
-static int unifb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- u_long line_length;
-
- /*
- * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
- * as FB_VMODE_SMOOTH_XPAN is only used internally
- */
-
- if (var->vmode & FB_VMODE_CONUPDATE) {
- var->vmode |= FB_VMODE_YWRAP;
- var->xoffset = info->var.xoffset;
- var->yoffset = info->var.yoffset;
- }
-
- /*
- * Some very basic checks
- */
- if (!var->xres)
- var->xres = 1;
- if (!var->yres)
- var->yres = 1;
- if (var->xres > var->xres_virtual)
- var->xres_virtual = var->xres;
- if (var->yres > var->yres_virtual)
- var->yres_virtual = var->yres;
- if (var->bits_per_pixel <= 1)
- var->bits_per_pixel = 1;
- else if (var->bits_per_pixel <= 8)
- var->bits_per_pixel = 8;
- else if (var->bits_per_pixel <= 16)
- var->bits_per_pixel = 16;
- else if (var->bits_per_pixel <= 24)
- var->bits_per_pixel = 24;
- else if (var->bits_per_pixel <= 32)
- var->bits_per_pixel = 32;
- else
- return -EINVAL;
-
- if (var->xres_virtual < var->xoffset + var->xres)
- var->xres_virtual = var->xoffset + var->xres;
- if (var->yres_virtual < var->yoffset + var->yres)
- var->yres_virtual = var->yoffset + var->yres;
-
- /*
- * Memory limit
- */
- line_length =
- get_line_length(var->xres_virtual, var->bits_per_pixel);
- if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
- return -ENOMEM;
-
- /*
- * Now that we checked it we alter var. The reason being is that the
- * video mode passed in might not work but slight changes to it might
- * make it work. This way we let the user know what is acceptable.
- */
- switch (var->bits_per_pixel) {
- case 1:
- case 8:
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 16: /* RGBA 5551 */
- if (var->transp.length) {
- var->red.offset = 0;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 5;
- var->blue.offset = 10;
- var->blue.length = 5;
- var->transp.offset = 15;
- var->transp.length = 1;
- } else { /* RGB 565 */
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- }
- break;
- case 24: /* RGB 888 */
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 16;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
- case 32: /* RGBA 8888 */
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- break;
- }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
-
- return 0;
-}
-
-/*
- * This routine actually sets the video mode. It's in here where we
- * the hardware state info->par and fix which can be affected by the
- * change in par. For this driver it doesn't do much.
- */
-static int unifb_set_par(struct fb_info *info)
-{
- int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
- int format;
-
-#ifdef CONFIG_PUV3_PM
- struct clk *clk_vga;
- u32 pixclk = 0;
- int i;
-
- for (i = 0; i <= 10; i++) {
- if (info->var.xres == unifb_modes[i].xres
- && info->var.yres == unifb_modes[i].yres
- && info->var.upper_margin == unifb_modes[i].upper_margin
- && info->var.lower_margin == unifb_modes[i].lower_margin
- && info->var.left_margin == unifb_modes[i].left_margin
- && info->var.right_margin == unifb_modes[i].right_margin
- && info->var.hsync_len == unifb_modes[i].hsync_len
- && info->var.vsync_len == unifb_modes[i].vsync_len) {
- pixclk = unifb_modes[i].pixclock;
- break;
- }
- }
-
- /* set clock rate */
- clk_vga = clk_get(info->device, "VGA_CLK");
- if (clk_vga == ERR_PTR(-ENOENT))
- return -ENOENT;
-
- if (pixclk != 0) {
- if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
- info->fix = unifb_fix;
- info->var = unifb_default;
- if (clk_set_rate(clk_vga, unifb_default.pixclock))
- return -EINVAL;
- }
- }
-#endif
-
- info->fix.line_length = get_line_length(info->var.xres_virtual,
- info->var.bits_per_pixel);
-
- hSyncStart = info->var.xres + info->var.right_margin;
- hSyncEnd = hSyncStart + info->var.hsync_len;
- hTotal = hSyncEnd + info->var.left_margin;
-
- vSyncStart = info->var.yres + info->var.lower_margin;
- vSyncEnd = vSyncStart + info->var.vsync_len;
- vTotal = vSyncEnd + info->var.upper_margin;
-
- switch (info->var.bits_per_pixel) {
- case 8:
- format = UDE_CFG_DST8;
- break;
- case 16:
- format = UDE_CFG_DST16;
- break;
- case 24:
- format = UDE_CFG_DST24;
- break;
- case 32:
- format = UDE_CFG_DST32;
- break;
- default:
- return -EINVAL;
- }
-
- writel(info->fix.smem_start, UDE_FSA);
- writel(info->var.yres, UDE_LS);
- writel(get_line_length(info->var.xres,
- info->var.bits_per_pixel) >> 3, UDE_PS);
- /* >> 3 for hardware required. */
- writel((hTotal << 16) | (info->var.xres), UDE_HAT);
- writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
- writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
- writel((vTotal << 16) | (info->var.yres), UDE_VAT);
- writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
- writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
- writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
- | format | 0xC0000001, UDE_CFG);
-
- return 0;
-}
-
-/*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- if (regno >= 256) /* no. of hw registers */
- return 1;
-
- /* grayscale works only partially under directcolor */
- if (info->var.grayscale) {
- /* grayscale = 0.30*R + 0.59*G + 0.11*B */
- red = green = blue =
- (red * 77 + green * 151 + blue * 28) >> 8;
- }
-
-#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- case FB_VISUAL_PSEUDOCOLOR:
- red = CNVT_TOHW(red, info->var.red.length);
- green = CNVT_TOHW(green, info->var.green.length);
- blue = CNVT_TOHW(blue, info->var.blue.length);
- transp = CNVT_TOHW(transp, info->var.transp.length);
- break;
- case FB_VISUAL_DIRECTCOLOR:
- red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
- green = CNVT_TOHW(green, 8);
- blue = CNVT_TOHW(blue, 8);
- /* hey, there is bug in transp handling... */
- transp = CNVT_TOHW(transp, 8);
- break;
- }
-#undef CNVT_TOHW
- /* Truecolor has hardware independent palette */
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 v;
-
- if (regno >= 16)
- return 1;
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
- switch (info->var.bits_per_pixel) {
- case 8:
- break;
- case 16:
- case 24:
- case 32:
- ((u32 *) (info->pseudo_palette))[regno] = v;
- break;
- default:
- return 1;
- }
- return 0;
- }
- return 0;
-}
-
-/*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- */
-static int unifb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0
- || var->yoffset >= info->var.yres_virtual
- || var->xoffset)
- return -EINVAL;
- } else {
- if (var->xoffset + info->var.xres > info->var.xres_virtual ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
- return 0;
-}
-
-int unifb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
-}
-
-static const struct fb_ops unifb_ops = {
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_check_var = unifb_check_var,
- .fb_set_par = unifb_set_par,
- .fb_setcolreg = unifb_setcolreg,
- .fb_pan_display = unifb_pan_display,
- .fb_fillrect = unifb_fillrect,
- .fb_copyarea = unifb_copyarea,
- .fb_imageblit = unifb_imageblit,
- .fb_mmap = unifb_mmap,
-};
-
-/*
- * Initialisation
- */
-static int unifb_probe(struct platform_device *dev)
-{
- struct fb_info *info;
- u32 unifb_regs[UNIFB_REGS_NUM];
- int retval = -ENOMEM;
- struct resource *iomem;
- void *videomemory;
-
- videomemory = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP,
- get_order(UNIFB_MEMSIZE));
- if (!videomemory)
- goto err;
-
- memset(videomemory, 0, UNIFB_MEMSIZE);
-
- unifb_fix.smem_start = virt_to_phys(videomemory);
- unifb_fix.smem_len = UNIFB_MEMSIZE;
-
- iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- unifb_fix.mmio_start = iomem->start;
-
- info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
- if (!info)
- goto err;
-
- info->screen_base = (char __iomem *)videomemory;
- info->fbops = &unifb_ops;
-
- retval = fb_find_mode(&info->var, info, NULL,
- unifb_modes, 10, &unifb_modes[0], 16);
-
- if (!retval || (retval == 4))
- info->var = unifb_default;
-
- info->fix = unifb_fix;
- info->pseudo_palette = info->par;
- info->par = NULL;
- info->flags = FBINFO_FLAG_DEFAULT;
-#ifdef FB_ACCEL_PUV3_UNIGFX
- info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
-#endif
-
- retval = fb_alloc_cmap(&info->cmap, 256, 0);
- if (retval < 0)
- goto err1;
-
- retval = register_framebuffer(info);
- if (retval < 0)
- goto err2;
- platform_set_drvdata(dev, info);
- platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
-
- fb_info(info, "Virtual frame buffer device, using %dM of video memory\n",
- UNIFB_MEMSIZE >> 20);
- return 0;
-err2:
- fb_dealloc_cmap(&info->cmap);
-err1:
- framebuffer_release(info);
-err:
- return retval;
-}
-
-static int unifb_remove(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
-
- if (info) {
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int unifb_resume(struct platform_device *dev)
-{
- int rc = 0;
- u32 *unifb_regs = dev->dev.platform_data;
-
- if (dev->dev.power.power_state.event == PM_EVENT_ON)
- return 0;
-
- console_lock();
-
- if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- writel(unifb_regs[0], UDE_FSA);
- writel(unifb_regs[1], UDE_LS);
- writel(unifb_regs[2], UDE_PS);
- writel(unifb_regs[3], UDE_HAT);
- writel(unifb_regs[4], UDE_HBT);
- writel(unifb_regs[5], UDE_HST);
- writel(unifb_regs[6], UDE_VAT);
- writel(unifb_regs[7], UDE_VBT);
- writel(unifb_regs[8], UDE_VST);
- writel(unifb_regs[9], UDE_CFG);
- }
- dev->dev.power.power_state = PMSG_ON;
-
- console_unlock();
-
- return rc;
-}
-
-static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
-{
- u32 *unifb_regs = dev->dev.platform_data;
-
- unifb_regs[0] = readl(UDE_FSA);
- unifb_regs[1] = readl(UDE_LS);
- unifb_regs[2] = readl(UDE_PS);
- unifb_regs[3] = readl(UDE_HAT);
- unifb_regs[4] = readl(UDE_HBT);
- unifb_regs[5] = readl(UDE_HST);
- unifb_regs[6] = readl(UDE_VAT);
- unifb_regs[7] = readl(UDE_VBT);
- unifb_regs[8] = readl(UDE_VST);
- unifb_regs[9] = readl(UDE_CFG);
-
- if (mesg.event == dev->dev.power.power_state.event)
- return 0;
-
- switch (mesg.event) {
- case PM_EVENT_FREEZE: /* about to take snapshot */
- case PM_EVENT_PRETHAW: /* before restoring snapshot */
- goto done;
- }
-
- console_lock();
-
- /* do nothing... */
-
- console_unlock();
-
-done:
- dev->dev.power.power_state = mesg;
-
- return 0;
-}
-#else
-#define unifb_resume NULL
-#define unifb_suspend NULL
-#endif
-
-static struct platform_driver unifb_driver = {
- .probe = unifb_probe,
- .remove = unifb_remove,
- .resume = unifb_resume,
- .suspend = unifb_suspend,
- .driver = {
- .name = "PKUnity-v3-UNIGFX",
- },
-};
-
-static int __init unifb_init(void)
-{
-#ifndef MODULE
- if (fb_get_options("unifb", NULL))
- return -ENODEV;
-#endif
-
- return platform_driver_register(&unifb_driver);
-}
-
-module_init(unifb_init);
-
-static void __exit unifb_exit(void)
-{
- platform_driver_unregister(&unifb_driver);
-}
-
-module_exit(unifb_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index eda893b7a2e9..9a98c4a6ba33 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
@@ -300,7 +300,7 @@ static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl,
unsigned int* in, unsigned int* feed, unsigned int* post,
unsigned int* htotal2) {
unsigned int fvco;
- unsigned int uninitialized_var(p);
+ unsigned int p;
fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
if (!fvco)
@@ -732,8 +732,8 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
for (x = 0; x < 8; x++) {
unsigned int c;
- unsigned int uninitialized_var(a), uninitialized_var(b),
- uninitialized_var(h2);
+ unsigned int a, b,
+ h2;
unsigned int h = ht + 2 + x;
if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 7497bd36334c..a8faf46adeb1 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -821,9 +821,9 @@ static void pm3fb_write_mode(struct fb_info *info)
wmb();
{
- unsigned char uninitialized_var(m); /* ClkPreScale */
- unsigned char uninitialized_var(n); /* ClkFeedBackScale */
- unsigned char uninitialized_var(p); /* ClkPostScale */
+ unsigned char m; /* ClkPreScale */
+ unsigned char n; /* ClkFeedBackScale */
+ unsigned char p; /* ClkPostScale */
unsigned long pixclock = PICOS2KHZ(info->var.pixclock);
(void)pm3fb_calculate_clock(pixclock, &m, &n, &p);
diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index 08c9ee46978e..4168ac464565 100644
--- a/drivers/video/fbdev/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
@@ -1245,8 +1245,7 @@ int CalcStateExt
)
{
int pixelDepth;
- int uninitialized_var(VClk),uninitialized_var(m),
- uninitialized_var(n), uninitialized_var(p);
+ int VClk, m, n, p;
/*
* Save mode parameters.
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 18ebd7a6af98..0b43efddea22 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
* Changes the event filter mask for the given session.
*
* This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
- * do session cleanup. Takes the session spinlock.
+ * do session cleanup. Takes the session mutex.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
@@ -662,7 +662,156 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
}
/**
- * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Set guest capabilities on the host.
+ * Must be called with gdev->session_mutex hold.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @session_termination: Set if we're called by the session cleanup code.
+ */
+static int vbg_set_host_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ bool session_termination)
+{
+ struct vmmdev_mask *req;
+ u32 caps;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&gdev->session_mutex));
+
+ caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
+
+ if (gdev->guest_caps_host == caps)
+ return 0;
+
+ /* On termination the requestor is the kernel, as we're cleaning up. */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
+ if (!req) {
+ gdev->guest_caps_host = U32_MAX;
+ return -ENOMEM;
+ }
+
+ req->or_mask = caps;
+ req->not_mask = ~caps;
+ rc = vbg_req_perform(gdev, req);
+ vbg_req_free(req, sizeof(*req));
+
+ gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
+
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Acquire (get exclusive access) guest capabilities for a session.
+ * Takes the session mutex.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
+ * @or_mask: The capabilities to add.
+ * @not_mask: The capabilities to remove.
+ * @session_termination: Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ */
+static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 or_mask, u32 not_mask,
+ u32 flags, bool session_termination)
+{
+ unsigned long irqflags;
+ bool wakeup = false;
+ int ret = 0;
+
+ mutex_lock(&gdev->session_mutex);
+
+ if (gdev->set_guest_caps_tracker.mask & or_mask) {
+ vbg_err("%s error: cannot acquire caps which are currently set\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Mark any caps in the or_mask as now being in acquire-mode. Note
+ * once caps are in acquire_mode they always stay in this mode.
+ * This impacts event handling, so we take the event-lock.
+ */
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ gdev->acquire_mode_guest_caps |= or_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ /* If we only have to switch the caps to acquire mode, we're done. */
+ if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
+ goto out;
+
+ not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
+ not_mask &= session->acquired_guest_caps;
+ or_mask &= ~session->acquired_guest_caps;
+
+ if (or_mask == 0 && not_mask == 0)
+ goto out;
+
+ if (gdev->acquired_guest_caps & or_mask) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ gdev->acquired_guest_caps |= or_mask;
+ gdev->acquired_guest_caps &= ~not_mask;
+ /* session->acquired_guest_caps impacts event handling, take the lock */
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ session->acquired_guest_caps |= or_mask;
+ session->acquired_guest_caps &= ~not_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ ret = vbg_set_host_capabilities(gdev, session, session_termination);
+ /* Roll back on failure, unless it's session termination time. */
+ if (ret < 0 && !session_termination) {
+ gdev->acquired_guest_caps &= ~or_mask;
+ gdev->acquired_guest_caps |= not_mask;
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+ session->acquired_guest_caps &= ~or_mask;
+ session->acquired_guest_caps |= not_mask;
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+ }
+
+ /*
+ * If we added a capability, check if that means some other thread in
+ * our session should be unblocked because there are events pending
+ * (the result of vbg_get_allowed_event_mask_for_session() may change).
+ *
+ * HACK ALERT! When the seamless support capability is added we generate
+ * a seamless change event so that the ring-3 client can sync with
+ * the seamless state.
+ */
+ if (ret == 0 && or_mask != 0) {
+ spin_lock_irqsave(&gdev->event_spinlock, irqflags);
+
+ if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
+ gdev->pending_events |=
+ VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+ if (gdev->pending_events)
+ wakeup = true;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
+
+ if (wakeup)
+ wake_up(&gdev->event_wq);
+ }
+
+out:
+ mutex_unlock(&gdev->session_mutex);
+
+ return ret;
+}
+
+/**
+ * Sets the guest capabilities for a session. Takes the session mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
@@ -678,62 +827,40 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
u32 or_mask, u32 not_mask,
bool session_termination)
{
- struct vmmdev_mask *req;
u32 changed, previous;
- int rc, ret = 0;
-
- /*
- * Allocate a request buffer before taking the spinlock, when
- * the session is being terminated the requestor is the kernel,
- * as we're cleaning up.
- */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
- session_termination ? VBG_KERNEL_REQUEST :
- session->requestor);
- if (!req) {
- if (!session_termination)
- return -ENOMEM;
- /* Ignore allocation failure, we must do session cleanup. */
- }
+ int ret = 0;
mutex_lock(&gdev->session_mutex);
+ if (gdev->acquire_mode_guest_caps & or_mask) {
+ vbg_err("%s error: cannot set caps which are in acquire_mode\n",
+ __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
/* Apply the changes to the session mask. */
- previous = session->guest_caps;
- session->guest_caps |= or_mask;
- session->guest_caps &= ~not_mask;
+ previous = session->set_guest_caps;
+ session->set_guest_caps |= or_mask;
+ session->set_guest_caps &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
- changed = previous ^ session->guest_caps;
+ changed = previous ^ session->set_guest_caps;
if (!changed)
goto out;
- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
- or_mask = gdev->guest_caps_tracker.mask;
-
- if (gdev->guest_caps_host == or_mask || !req)
- goto out;
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
- gdev->guest_caps_host = or_mask;
- req->or_mask = or_mask;
- req->not_mask = ~or_mask;
- rc = vbg_req_perform(gdev, req);
- if (rc < 0) {
- ret = vbg_status_code_to_errno(rc);
-
- /* Failed, roll back (unless it's session termination time). */
- gdev->guest_caps_host = U32_MAX;
- if (session_termination)
- goto out;
-
- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
- session->guest_caps);
- session->guest_caps = previous;
+ ret = vbg_set_host_capabilities(gdev, session, session_termination);
+ /* Roll back on failure, unless it's session termination time. */
+ if (ret < 0 && !session_termination) {
+ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
+ session->set_guest_caps);
+ session->set_guest_caps = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
- vbg_req_free(req, sizeof(*req));
return ret;
}
@@ -949,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session)
struct vbg_dev *gdev = session->gdev;
int i, rc;
+ vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
@@ -1006,6 +1134,25 @@ static int vbg_ioctl_driver_version_info(
return 0;
}
+/* Must be called with the event_lock held */
+static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
+ struct vbg_session *session)
+{
+ u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
+ u32 session_acquired_caps = session->acquired_guest_caps;
+ u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
+
+ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
+ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
+ allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
+
+ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
+ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
+ allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
+
+ return allowed_events;
+}
+
static bool vbg_wait_event_cond(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
@@ -1017,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev,
spin_lock_irqsave(&gdev->event_spinlock, flags);
events = gdev->pending_events & event_mask;
+ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
wakeup = events || session->cancel_waiters;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
@@ -1031,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
{
u32 events = gdev->pending_events & event_mask;
+ events &= vbg_get_allowed_event_mask_for_session(gdev, session);
gdev->pending_events &= ~events;
return events;
}
@@ -1150,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
+ case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
case VMMDEVREQ_GET_VRDPCHANGE_REQ:
case VMMDEVREQ_LOG_STRING:
@@ -1432,6 +1583,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
false);
}
+static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_acquire_guest_caps *caps)
+{
+ u32 flags, or_mask, not_mask;
+
+ if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
+ return -EINVAL;
+
+ flags = caps->u.in.flags;
+ or_mask = caps->u.in.or_mask;
+ not_mask = caps->u.in.not_mask;
+
+ if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
+ return -EINVAL;
+
+ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
+ return -EINVAL;
+
+ return vbg_acquire_session_capabilities(gdev, session, or_mask,
+ not_mask, flags, false);
+}
+
static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
{
@@ -1452,7 +1626,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
if (ret)
return ret;
- caps->u.out.session_caps = session->guest_caps;
+ caps->u.out.session_caps = session->set_guest_caps;
caps->u.out.global_caps = gdev->guest_caps_host;
return 0;
@@ -1541,6 +1715,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
case VBG_IOCTL_CHANGE_FILTER_MASK:
return vbg_ioctl_change_filter_mask(gdev, session, data);
+ case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
+ return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
return vbg_ioctl_change_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHECK_BALLOON:
@@ -1563,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
return vbg_ioctl_log(data);
}
- vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+ vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
return -ENOTTY;
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 77c3a9c8255d..ab4bf64e2cec 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -118,11 +118,21 @@ struct vbg_dev {
u32 event_filter_host;
/**
- * Usage counters for guest capabilities. Indexed by capability bit
+ * Guest capabilities which have been switched to acquire_mode.
+ */
+ u32 acquire_mode_guest_caps;
+ /**
+ * Guest capabilities acquired by vbg_acquire_session_capabilities().
+ * Only one session can acquire a capability at a time.
+ */
+ u32 acquired_guest_caps;
+ /**
+ * Usage counters for guest capabilities requested through
+ * vbg_set_session_capabilities(). Indexed by capability bit
* number, one count per session using a capability.
* Protected by session_mutex.
*/
- struct vbg_bit_usage_tracker guest_caps_tracker;
+ struct vbg_bit_usage_tracker set_guest_caps_tracker;
/**
* The guest capabilities last reported to the host (or UINT32_MAX).
* Protected by session_mutex.
@@ -164,11 +174,16 @@ struct vbg_session {
*/
u32 event_filter;
/**
- * Guest capabilities for this session.
+ * Guest capabilities acquired by vbg_acquire_session_capabilities().
+ * Only one session can acquire a capability at a time.
+ */
+ u32 acquired_guest_caps;
+ /**
+ * Guest capabilities set through vbg_set_session_capabilities().
* A capability claimed by any guest session will be reported to the
* host. Protected by vbg_gdev.session_mutex.
*/
- u32 guest_caps;
+ u32 set_guest_caps;
/** VMMDEV_REQUESTOR_* flags */
u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 7396187ee32a..ea05af41ec69 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -59,6 +59,7 @@ EXPORT_SYMBOL(name)
VBG_LOG(vbg_info, pr_info);
VBG_LOG(vbg_warn, pr_warn);
VBG_LOG(vbg_err, pr_err);
+VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
VBG_LOG(vbg_debug, pr_debug);
#endif
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 5809e5f5b157..5c92e4a50882 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -85,7 +85,7 @@ config VIRTIO_MEM
depends on VIRTIO
depends on MEMORY_HOTPLUG_SPARSE
depends on MEMORY_HOTREMOVE
- select CONTIG_ALLOC
+ depends on CONTIG_ALLOC
help
This driver provides access to virtio-mem paravirtualized memory
devices, allowing to hotplug and hotunplug memory.
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f157d2f4952..8be02f333b7a 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb)
static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
- &vb->config_read_bitmap))
+ &vb->config_read_bitmap)) {
virtio_cread(vb->vdev, struct virtio_balloon_config,
free_page_hint_cmd_id,
&vb->cmd_id_received_cache);
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+ }
return vb->cmd_id_received_cache;
}
@@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev)
/*
* Let the hypervisor know that we are expecting a
* specific value to be written back in balloon pages.
+ *
+ * If the PAGE_POISON value was larger than a byte we would
+ * need to byte swap poison_val here to guarantee it is
+ * little-endian. However for now it is a single byte so we
+ * can pass it as-is.
*/
if (!want_init_on_free())
memset(&poison_val, PAGE_POISON, sizeof(poison_val));
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 58b96baa8d48..a2de775801af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -424,7 +424,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
+ unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
@@ -1101,8 +1101,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, c, descs_used, err_idx;
- __le16 uninitialized_var(head_flags), flags;
- u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
+ __le16 head_flags, flags;
+ u16 head, id, prev, curr, avail_used_flags;
START_USE(vq);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 727f11eb46b2..1d339ef92422 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -179,6 +179,7 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ select DMA_OPS
select SWIOTLB
config XEN_PCIDEV_BACKEND