summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2018-05-23 11:05:26 +0200
committerJohannes Berg <johannes.berg@intel.com>2018-05-23 11:05:59 +0200
commitdd8070bff204a67fcb6585f18047841a895b68d7 (patch)
treecce17d4b8eb8987194beb40498c8f83e74d6799f /drivers
parentf3a7ca64587f58686d4e2e894e9abbfbc9dffb25 (diff)
parent1fe8c06c4a0d3b589f076cd00c25082840f10423 (diff)
downloadlinux-dd8070bff204a67fcb6585f18047841a895b68d7.tar.bz2
Merge remote-tracking branch 'net-next/master' into mac80211-next
Bring in net-next which had pulled in net, so I have the changes from mac80211 and can apply a patch that would otherwise conflict. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/acpica/acnamesp.h4
-rw-r--r--drivers/acpi/acpica/exconfig.c14
-rw-r--r--drivers/acpi/acpica/nsinit.c76
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/ata/ahci.c6
-rw-r--r--drivers/ata/ahci.h8
-rw-r--r--drivers/ata/ahci_mvebu.c56
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libahci.c20
-rw-r--r--drivers/ata/libahci_platform.c24
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/base/firmware_loader/fallback.c4
-rw-r--r--drivers/base/firmware_loader/fallback.h2
-rw-r--r--drivers/block/loop.c109
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/rbd.c105
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/btbcm.c201
-rw-r--r--drivers/bluetooth/btbcm.h5
-rw-r--r--drivers/bluetooth/btqca.c104
-rw-r--r--drivers/bluetooth/btqca.h11
-rw-r--r--drivers/bluetooth/btqcomsmd.c10
-rw-r--r--drivers/bluetooth/btusb.c21
-rw-r--r--drivers/bluetooth/hci_bcm.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_qca.c116
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c4
-rw-r--r--drivers/char/random.c172
-rw-r--r--drivers/char/virtio_console.c157
-rw-r--r--drivers/clk/Kconfig33
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c8
-rw-r--r--drivers/clk/clk-cs2000-cp.c4
-rw-r--r--drivers/clk/clk-divider.c58
-rw-r--r--drivers/clk/clk-gpio.c4
-rw-r--r--drivers/clk/clk-mux.c85
-rw-r--r--drivers/clk/clk-si544.c411
-rw-r--r--drivers/clk/clk-stm32f4.c14
-rw-r--r--drivers/clk/clk-stm32mp1.c2109
-rw-r--r--drivers/clk/clk.c92
-rw-r--r--drivers/clk/davinci/Makefile21
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c790
-rw-r--r--drivers/clk/davinci/pll-da830.c70
-rw-r--r--drivers/clk/davinci/pll-da850.c212
-rw-r--r--drivers/clk/davinci/pll-dm355.c79
-rw-r--r--drivers/clk/davinci/pll-dm365.c145
-rw-r--r--drivers/clk/davinci/pll-dm644x.c80
-rw-r--r--drivers/clk/davinci/pll-dm646x.c84
-rw-r--r--drivers/clk/davinci/pll.c899
-rw-r--r--drivers/clk/davinci/pll.h141
-rw-r--r--drivers/clk/davinci/psc-da830.c116
-rw-r--r--drivers/clk/davinci/psc-da850.c156
-rw-r--r--drivers/clk/davinci/psc-dm355.c88
-rw-r--r--drivers/clk/davinci/psc-dm365.c96
-rw-r--r--drivers/clk/davinci/psc-dm644x.c83
-rw-r--r--drivers/clk/davinci/psc-dm646x.c80
-rw-r--r--drivers/clk/davinci/psc.c551
-rw-r--r--drivers/clk/davinci/psc.h108
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/clk-hisi-phase.c121
-rw-r--r--drivers/clk/hisilicon/clk.c26
-rw-r--r--drivers/clk/hisilicon/clk.h19
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c2
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c100
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-imx6sll.c340
-rw-r--r--drivers/clk/imx/clk-imx6sx.c14
-rw-r--r--drivers/clk/imx/clk-imx6ul.c7
-rw-r--r--drivers/clk/imx/clk-imx7d.c113
-rw-r--r--drivers/clk/imx/clk-pllv2.c6
-rw-r--r--drivers/clk/imx/clk.h14
-rw-r--r--drivers/clk/keystone/sci-clk.c380
-rw-r--r--drivers/clk/mediatek/Kconfig6
-rw-r--r--drivers/clk/mediatek/Makefile1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c186
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c69
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c15
-rw-r--r--drivers/clk/meson/Kconfig9
-rw-r--r--drivers/clk/meson/Makefile5
-rw-r--r--drivers/clk/meson/axg.c955
-rw-r--r--drivers/clk/meson/axg.h12
-rw-r--r--drivers/clk/meson/clk-audio-divider.c63
-rw-r--r--drivers/clk/meson/clk-cpu.c178
-rw-r--r--drivers/clk/meson/clk-mpll.c125
-rw-r--r--drivers/clk/meson/clk-pll.c306
-rw-r--r--drivers/clk/meson/clk-regmap.c175
-rw-r--r--drivers/clk/meson/clk-regmap.h111
-rw-r--r--drivers/clk/meson/clkc.h107
-rw-r--r--drivers/clk/meson/gxbb-aoclk-regmap.c46
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c20
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h13
-rw-r--r--drivers/clk/meson/gxbb.c1591
-rw-r--r--drivers/clk/meson/gxbb.h14
-rw-r--r--drivers/clk/meson/meson8b.c706
-rw-r--r--drivers/clk/meson/meson8b.h17
-rw-r--r--drivers/clk/mvebu/armada-38x.c14
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c94
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c20
-rw-r--r--drivers/clk/qcom/clk-rpm.c79
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c9
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c8
-rw-r--r--drivers/clk/renesas/Kconfig13
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c22
-rw-r--r--drivers/clk/renesas/clk-mstp.c4
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c11
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c8
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c17
-rw-r--r--drivers/clk/renesas/clk-rz.c4
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c20
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7790-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c334
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c227
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c143
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h2
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h2
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c78
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c5
-rw-r--r--drivers/clk/rockchip/clk.c22
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c4
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c114
-rw-r--r--drivers/clk/samsung/clk-exynos4.c103
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c189
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h26
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c111
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c90
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c20
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c189
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c121
-rw-r--r--drivers/clk/samsung/clk-exynos7.c2
-rw-r--r--drivers/clk/samsung/clk-pll.h48
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c148
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c25
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c55
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c17
-rw-r--r--drivers/clk/socfpga/Makefile9
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c125
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c149
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c146
-rw-r--r--drivers/clk/socfpga/clk-s10.c345
-rw-r--r--drivers/clk/socfpga/clk.h4
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h80
-rw-r--r--drivers/clk/sprd/sc9860-clk.c76
-rw-r--r--drivers/clk/sunxi-ng/Kconfig12
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c1211
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.h56
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c32
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h27
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-pll.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c8
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clk/tegra/clk-tegra124.c9
-rw-r--r--drivers/clk/tegra/clk-tegra20.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c361
-rw-r--r--drivers/clk/tegra/clk-tegra30.c15
-rw-r--r--drivers/clk/tegra/clk.h7
-rw-r--r--drivers/clk/ti/clk.c38
-rw-r--r--drivers/clk/ti/clock.h13
-rw-r--r--drivers/clk/ti/divider.c26
-rw-r--r--drivers/clk/ti/mux.c13
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c25
-rw-r--r--drivers/clk/ux500/Makefile2
-rw-r--r--drivers/clk/ux500/abx500-clk.c16
-rw-r--r--drivers/clk/ux500/u8540_clk.c597
-rw-r--r--drivers/clk/ux500/u9540_clk.c18
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c5
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-imx-tpm.c45
-rw-r--r--drivers/clocksource/timer-npcm7xx.c215
-rw-r--r--drivers/connector/cn_proc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c46
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c14
-rw-r--r--drivers/crypto/qat/qat_common/.gitignore1
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/qcom/bam_dma.c18
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/dmi_scan.c16
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c10
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c8
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c22
-rw-r--r--drivers/gpio/gpiolib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c99
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h9
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c72
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h182
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c71
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c1364
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h18
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c39
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_file.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c73
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c13
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c57
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c25
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c24
-rw-r--r--drivers/hid/hid-lenovo.c36
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c15
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c36
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/wacom_sys.c4
-rw-r--r--drivers/hid/wacom_wac.c76
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/k10temp.c62
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c5
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-sprd.c22
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/i2c/i2c-core-base.c3
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/infiniband/Kconfig5
-rw-r--r--drivers/infiniband/core/cache.c55
-rw-r--r--drivers/infiniband/core/cma.c60
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c26
-rw-r--r--drivers/infiniband/core/ucma.c44
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c6
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c9
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c12
-rw-r--r--drivers/infiniband/core/verbs.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c26
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c11
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c19
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h8
-rw-r--r--drivers/infiniband/hw/hfi1/init.c43
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c50
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c32
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c21
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c2
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig2
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig2
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/rmi4/rmi_spi.c7
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c431
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/dma-iommu.c54
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c4
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c8
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c8
-rw-r--r--drivers/md/bcache/request.c5
-rw-r--r--drivers/md/bcache/super.c75
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-background-tracker.c2
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm.c7
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid1.c25
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa711x_regs.h2
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/tvp5150_reg.h2
-rw-r--r--drivers/media/i2c/tvp7002.c2
-rw-r--r--drivers/media/i2c/tvp7002_reg.h2
-rw-r--r--drivers/media/media-devnode.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.h2
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-core.c2
-rw-r--r--drivers/media/pci/cx88/cx88-i2c.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-trust.c2
-rw-r--r--drivers/media/radio/radio-typhoon.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c2
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c2
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c2
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c2
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c2
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c2
-rw-r--r--drivers/media/tuners/tea5761.c4
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h2
-rw-r--r--drivers/media/usb/gspca/zc3xx-reg.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c6
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c4
-rw-r--r--drivers/memory/emif-asm-offsets.c72
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c31
-rw-r--r--drivers/misc/cxl/cxl.h1
-rw-r--r--drivers/misc/cxl/pci.c12
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c39
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/onenand/omap2.c105
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c45
-rw-r--r--drivers/mtd/nand/raw/nand_base.c5
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_alb.c73
-rw-r--r--drivers/net/bonding/bond_main.c90
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c26
-rw-r--r--drivers/net/can/spi/hi311x.c11
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c81
-rw-r--r--drivers/net/dsa/b53/b53_priv.h6
-rw-r--r--drivers/net/dsa/bcm_sf2.c200
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c36
-rw-r--r--drivers/net/dsa/dsa_loop.c12
-rw-r--r--drivers/net/dsa/lan9303-core.c11
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c11
-rw-r--r--drivers/net/dsa/mt7530.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c414
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h19
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c109
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h45
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c67
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c39
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c20
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h3
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c166
-rw-r--r--drivers/net/ethernet/8390/Kconfig17
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c228
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/xsurf100.c382
-rw-r--r--drivers/net/ethernet/Kconfig15
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c196
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c20
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c164
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c166
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c124
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c60
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c510
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c715
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c368
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c259
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c15
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h79
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c182
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h15
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c328
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c297
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c117
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/e100.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile27
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c136
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c94
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c542
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c196
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c84
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c111
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c59
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h23
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c406
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h26
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile27
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c55
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c370
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h26
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1049
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c562
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c11
-rw-r--r--drivers/net/ethernet/mscc/Kconfig30
-rw-r--r--drivers/net/ethernet/mscc/Makefile5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1333
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h572
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ana.h625
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c316
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev.h275
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev_gmii.h154
-rw-r--r--drivers/net/ethernet/mscc/ocelot_hsio.h785
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c116
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qs.h78
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qsys.h270
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c497
-rw-r--r--drivers/net/ethernet/mscc/ocelot_rew.h81
-rw-r--r--drivers/net/ethernet/mscc/ocelot_sys.h144
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c354
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c172
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c106
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c44
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c59
-rw-r--r--drivers/net/ethernet/ni/nixge.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h74
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c118
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c109
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c170
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c98
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c1337
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c247
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c227
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c184
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c11
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c128
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h32
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c11
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c21
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c14
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c51
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c784
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c107
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h30
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c88
-rw-r--r--drivers/net/ethernet/sfc/efx.c179
-rw-r--r--drivers/net/ethernet/sfc/efx.h21
-rw-r--r--drivers/net/ethernet/sfc/farch.c41
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h61
-rw-r--r--drivers/net/ethernet/sfc/rx.c124
-rw-r--r--drivers/net/ethernet/socionext/Kconfig2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c252
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c120
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c268
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c351
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c295
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c117
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c10
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c23
-rw-r--r--drivers/net/geneve.c72
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/Kconfig1
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c58
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/ieee802154/atusb.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c15
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c68
-rw-r--r--drivers/net/phy/Kconfig27
-rw-r--r--drivers/net/phy/Makefile4
-rw-r--r--drivers/net/phy/asix.c63
-rw-r--r--drivers/net/phy/bcm-phy-lib.c6
-rw-r--r--drivers/net/phy/broadcom.c10
-rw-r--r--drivers/net/phy/dp83tc811.c347
-rw-r--r--drivers/net/phy/marvell.c14
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-boardinfo.c5
-rw-r--r--drivers/net/phy/mdio-gpio.c128
-rw-r--r--drivers/net/phy/mdio-mscc-miim.c193
-rw-r--r--drivers/net/phy/micrel.c36
-rw-r--r--drivers/net/phy/microchip.c203
-rw-r--r--drivers/net/phy/microchip_t1.c74
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/phy/phylink.c20
-rw-r--r--drivers/net/phy/sfp-bus.c9
-rw-r--r--drivers/net/phy/smsc.c5
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c40
-rw-r--r--drivers/net/tun.c70
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/lan78xx.c235
-rw-r--r--drivers/net/usb/qmi_wwan.c14
-rw-r--r--drivers/net/virtio_net.c86
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c95
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile7
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c269
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h111
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c1414
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h95
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h46
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h357
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c138
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c36
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h8
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c40
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c22
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c19
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c14
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/nvm.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c464
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c438
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c52
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c43
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c52
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c20
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c33
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c108
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c72
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c34
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c27
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h19
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h1
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c27
-rw-r--r--drivers/nvdimm/Kconfig3
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c68
-rw-r--r--drivers/nvme/host/fabrics.c89
-rw-r--r--drivers/nvme/host/fabrics.h33
-rw-r--r--drivers/nvme/host/fc.c12
-rw-r--r--drivers/nvme/host/multipath.c24
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c47
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c1
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/nvme/target/loop.c26
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/of/overlay.c30
-rw-r--r--drivers/of/unittest-data/Makefile6
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/pci/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c53
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c42
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c45
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c896
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c76
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c16
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c141
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/asus-wireless.c4
-rw-r--r--drivers/ptp/ptp_pch.c7
-rw-r--r--drivers/pwm/Kconfig6
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c1
-rw-r--r--drivers/pwm/pwm-imx.c3
-rw-r--r--drivers/pwm/pwm-jz4740.c41
-rw-r--r--drivers/pwm/pwm-mediatek.c36
-rw-r--r--drivers/pwm/pwm-puv3.c4
-rw-r--r--drivers/pwm/pwm-rcar.c58
-rw-r--r--drivers/pwm/pwm-stm32-lp.c5
-rw-r--r--drivers/pwm/pwm-stm32.c22
-rw-r--r--drivers/pwm/pwm-sun4i.c38
-rw-r--r--drivers/pwm/sysfs.c3
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c19
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/reset/reset-uniphier.c6
-rw-r--r--drivers/rpmsg/rpmsg_char.c2
-rw-r--r--drivers/rtc/rtc-opal.c37
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c5
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/cio.c257
-rw-r--r--drivers/s390/cio/ioasm.c24
-rw-r--r--drivers/s390/cio/ioasm.h1
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/crypto/ap_bus.c32
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_debug.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c41
-rw-r--r--drivers/s390/crypto/zcrypt_api.c471
-rw-r--r--drivers/s390/crypto/zcrypt_api.h26
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h63
-rw-r--r--drivers/s390/net/qeth_core_main.c306
-rw-r--r--drivers/s390/net/qeth_core_mpc.h14
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c158
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/sbus/char/oradax.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c12
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/dpt_i2o.c13
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/isci/port_config.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/qedf/qedf.h7
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c1
-rw-r--r--drivers/scsi/qedf/qedf_fip.c7
-rw-r--r--drivers/scsi/qedf/qedf_io.c11
-rw-r--r--drivers/scsi/qedf/qedf_main.c106
-rw-r--r--drivers/scsi/qedi/qedi.h3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h6
-rw-r--r--drivers/scsi/qedi/qedi_main.c198
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_debug.c33
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_dh.c3
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c140
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/slimbus/messaging.c2
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/soc/samsung/pm_domains.c11
-rw-r--r--drivers/spi/spi-bcm-qspi.c28
-rw-r--r--drivers/spi/spi-bcm2835aux.c5
-rw-r--r--drivers/spi/spi-cadence.c8
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/tee/tee_shm.c5
-rw-r--r--drivers/thermal/Kconfig7
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c3
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c14
-rw-r--r--drivers/thermal/thermal_core.c3
-rw-r--r--drivers/thermal/thermal_core.h10
-rw-r--r--drivers/thermal/thermal_helpers.c5
-rw-r--r--drivers/thermal/thermal_sysfs.c225
-rw-r--r--drivers/tty/n_gsm.c23
-rw-r--r--drivers/tty/serial/earlycon.c6
-rw-r--r--drivers/tty/serial/imx.c19
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/tty_ldisc.c29
-rw-r--r--drivers/uio/uio_hv_generic.c72
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/hcd.c19
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/phy.c93
-rw-r--r--drivers/usb/core/phy.h22
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/gadget.c21
-rw-r--r--drivers/usb/dwc2/hcd.c13
-rw-r--r--drivers/usb/dwc2/pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/host/ehci-mem.c3
-rw-r--r--drivers/usb/host/ehci-sched.c6
-rw-r--r--drivers/usb/host/xhci-dbgtty.c8
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c32
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/visor.c69
-rw-r--r--drivers/usb/typec/tcpm.c1
-rw-r--r--drivers/usb/typec/tps6598x.c47
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c43
-rw-r--r--drivers/usb/usbip/stub_main.c100
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/usbip_event.c4
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c70
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h9
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c19
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c17
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c14
-rw-r--r--drivers/watchdog/asm9260_wdt.c8
-rw-r--r--drivers/watchdog/aspeed_wdt.c20
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c5
-rw-r--r--drivers/watchdog/at91sam9_wdt.c5
-rw-r--r--drivers/watchdog/at91sam9_wdt.h5
-rw-r--r--drivers/watchdog/bcm2835_wdt.c5
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c5
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c5
-rw-r--r--drivers/watchdog/bcm7038_wdt.c12
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c9
-rw-r--r--drivers/watchdog/cadence_wdt.c5
-rw-r--r--drivers/watchdog/coh901327_wdt.c18
-rw-r--r--drivers/watchdog/da9052_wdt.c6
-rw-r--r--drivers/watchdog/da9055_wdt.c6
-rw-r--r--drivers/watchdog/da9062_wdt.c10
-rw-r--r--drivers/watchdog/da9063_wdt.c5
-rw-r--r--drivers/watchdog/davinci_wdt.c15
-rw-r--r--drivers/watchdog/digicolor_wdt.c5
-rw-r--r--drivers/watchdog/dw_wdt.c32
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c1
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--drivers/watchdog/gpio_wdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c312
-rw-r--r--drivers/watchdog/imx2_wdt.c8
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c2
-rw-r--r--drivers/watchdog/mei_wdt.c12
-rw-r--r--drivers/watchdog/mena21_wdt.c4
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c50
-rw-r--r--drivers/watchdog/meson_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c13
-rw-r--r--drivers/watchdog/mtx-1_wdt.c11
-rw-r--r--drivers/watchdog/npcm_wdt.c254
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c8
-rw-r--r--drivers/watchdog/omap_wdt.c4
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c93
-rw-r--r--drivers/watchdog/sama5d4_wdt.c6
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c2
-rw-r--r--drivers/watchdog/sprd_wdt.c4
-rw-r--r--drivers/watchdog/st_lpc_wdt.c6
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tangox_wdt.c6
-rw-r--r--drivers/watchdog/tegra_wdt.c10
-rw-r--r--drivers/watchdog/uniphier_wdt.c15
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/watchdog/wm831x_wdt.c5
-rw-r--r--drivers/watchdog/wm8350_wdt.c5
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c3
1533 files changed, 52342 insertions, 23058 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
return opregion;
}
+static bool dmi_is_desktop(void)
+{
+ const char *chassis_type;
+
+ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ if (!chassis_type)
+ return false;
+
+ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
+ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
+ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
+ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
+ !strcmp(chassis_type, "7") || /* 7: Tower */
+ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ return true;
+
+ return false;
+}
+
int acpi_video_register(void)
{
int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
* win8 ready (where we also prefer the native backlight driver, so
* normally the acpi_video code should not register there anyways).
*/
- if (only_lcd == -1)
- only_lcd = acpi_osi_is_win8();
+ if (only_lcd == -1) {
+ if (dmi_is_desktop() && acpi_osi_is_win8())
+ only_lcd = true;
+ else
+ only_lcd = false;
+ }
dmi_check_system(video_dmi_table);
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
#define pr_fmt(fmt) "ACPI: watchdog: " fmt
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include "internal.h"
+static const struct dmi_system_id acpi_watchdog_skip[] = {
+ {
+ /*
+ * On Lenovo Z50-70 there are two issues with the WDAT
+ * table. First some of the instructions use RTC SRAM
+ * to store persistent information. This does not work well
+ * with Linux RTC driver. Second, more important thing is
+ * that the instructions do not actually reset the system.
+ *
+ * On this particular system iTCO_wdt seems to work just
+ * fine so we prefer that over WDAT for now.
+ *
+ * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
+ */
+ .ident = "Lenovo Z50-70",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
+ },
+ },
+ {}
+};
+
+static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+{
+ const struct acpi_table_wdat *wdat = NULL;
+ acpi_status status;
+
+ if (acpi_disabled)
+ return NULL;
+
+ if (dmi_check_system(acpi_watchdog_skip))
+ return NULL;
+
+ status = acpi_get_table(ACPI_SIG_WDAT, 0,
+ (struct acpi_table_header **)&wdat);
+ if (ACPI_FAILURE(status)) {
+ /* It is fine if there is no WDAT */
+ return NULL;
+ }
+
+ return wdat;
+}
+
/**
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
bool acpi_has_watchdog(void)
{
- struct acpi_table_header hdr;
-
- if (acpi_disabled)
- return false;
-
- return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
+ return !!acpi_watchdog_get_wdat();
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
struct platform_device *pdev;
struct resource *resources;
size_t nresources = 0;
- acpi_status status;
int i;
- status = acpi_get_table(ACPI_SIG_WDAT, 0,
- (struct acpi_table_header **)&wdat);
- if (ACPI_FAILURE(status)) {
+ wdat = acpi_watchdog_get_wdat();
+ if (!wdat) {
/* It is fine if there is no WDAT */
return;
}
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 514aaf948ea9..3825df923480 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void);
acpi_status acpi_ns_initialize_devices(u32 flags);
+acpi_status
+acpi_ns_init_one_package(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
/*
* nsload - Namespace loading
*/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 99d92cb32803..f85c6f3271f6 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
+ /* Complete the initialization/resolution of package objects */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, 0,
+ acpi_ns_init_one_package, NULL, NULL,
+ NULL);
+
/* Parameter Data (optional) */
if (parameter_node) {
@@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ /* Complete the initialization/resolution of package objects */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, 0,
+ acpi_ns_init_one_package, NULL, NULL,
+ NULL);
+
/* Store the ddb_handle into the Target operand */
status = acpi_ex_store(ddb_handle, target, walk_state);
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 77f2b5f4948a..d77257d1c827 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -242,6 +242,58 @@ error_exit:
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_init_one_package
+ *
+ * PARAMETERS: obj_handle - Node
+ * level - Current nesting level
+ * context - Not used
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package
+ * within the namespace. Used during dynamic load of an SSDT.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_init_one_package(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return (AE_OK);
+ }
+
+ /* Exit if package is already initialized */
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) {
+ return (AE_OK);
+ }
+
+ status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ status =
+ acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element, NULL);
+ if (ACPI_FAILURE(status)) {
+ return (AE_OK);
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_init_one_object
*
* PARAMETERS: obj_handle - Node
@@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_PACKAGE:
- info->package_init++;
- status = acpi_ds_get_package_arguments(obj_desc);
- if (ACPI_FAILURE(status)) {
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
- "%s: Completing resolution of Package elements\n",
- ACPI_GET_FUNCTION_NAME));
+ /* Complete the initialization/resolution of the package object */
- /*
- * Resolve all named references in package objects (and all
- * sub-packages). This action has been deferred until the entire
- * namespace has been loaded, in order to support external and
- * forward references from individual package elements (05/2017).
- */
- status = acpi_ut_walk_package_tree(obj_desc, NULL,
- acpi_ds_init_package_element,
- NULL);
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ info->package_init++;
+ status =
+ acpi_ns_init_one_package(obj_handle, level, NULL, NULL);
break;
default:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
NULL, 0644);
MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
-module_acpi_driver(acpi_button_driver);
+static int acpi_button_register_driver(struct acpi_driver *driver)
+{
+ /*
+ * Modules such as nouveau.ko and i915.ko have a link time dependency
+ * on acpi_lid_open(), and would therefore not be loadable on ACPI
+ * capable kernels booted in non-ACPI mode if the return value of
+ * acpi_bus_register_driver() is returned from here with ACPI disabled
+ * when this driver is built as a module.
+ */
+ if (acpi_disabled)
+ return 0;
+
+ return acpi_bus_register_driver(driver);
+}
+
+static void acpi_button_unregister_driver(struct acpi_driver *driver)
+{
+ if (!acpi_disabled)
+ acpi_bus_unregister_driver(driver);
+}
+
+module_driver(acpi_button_driver, acpi_button_register_driver,
+ acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
+ acpi_watchdog_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
acpi_amba_init();
- acpi_watchdog_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
},
},
+ /*
+ * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
+ * the Low Power S0 Idle firmware interface (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "ThinkPad X1 Tablet(2016)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
+ },
+ },
{},
};
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 594c228d2f02..4a3ac31c07d0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 764b63a5aade..e578eee31589 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
+ if (target_node && target_proc == proc) {
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
}
if (!target_node) {
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 1ff17799769d..6389c88b3500 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -698,7 +698,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
@@ -724,7 +724,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc;
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
@@ -788,7 +788,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
for (i = 0; i < 2; i++) {
u16 val;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 4356ef1d28a8..824bd399f02e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -350,7 +350,6 @@ struct ahci_host_priv {
u32 em_msg_type; /* EM message type */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
- struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
@@ -366,6 +365,13 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated.
*/
void (*start_engine)(struct ata_port *ap);
+ /*
+ * Optional ahci_stop_engine override, if not set this gets set to the
+ * default ahci_stop_engine during ahci_save_initial_config, this can
+ * be overridden anytime before the host is activated.
+ */
+ int (*stop_engine)(struct ata_port *ap);
+
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
/* only required for per-port MSI(-X) support */
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index de7128d81e9c..0045dacd814b 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
+/**
+ * ahci_mvebu_stop_engine
+ *
+ * @ap: Target ata port
+ *
+ * Errata Ref#226 - SATA Disk HOT swap issue when connected through
+ * Port Multiplier in FIS-based Switching mode.
+ *
+ * To avoid the issue, according to design, the bits[11:8, 0] of
+ * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
+ * changes its value from 1 to 0, i.e. falling edge of Port
+ * Command and Status bit[0] sends PULSE that resets PxFBS
+ * bits[11:8; 0].
+ *
+ * This function is used to override function of "ahci_stop_engine"
+ * from libahci.c by adding the mvebu work around(WA) to save PxFBS
+ * value before the PxCMD ST write of 0, then restore PxFBS value.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int ahci_mvebu_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp, port_fbs;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* save the port PxFBS register for later restore */
+ port_fbs = readl(port_mmio + PORT_FBS);
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /*
+ * bit #15 PxCMD signal doesn't clear PxFBS,
+ * restore the PxFBS register right after clearing the PxCMD ST,
+ * no need to wait for the PxCMD bit #15.
+ */
+ writel(port_fbs, port_mmio + PORT_FBS);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
if (rc)
return rc;
+ hpriv->stop_engine = ahci_mvebu_stop_engine;
+
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-380-ahci")) {
dram = mv_mbus_dram_info();
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 2685f28160f7..cfdef4d44ae9 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
/*
* There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index c2b5941d9184..ad58da7c9aff 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
PORT_CMD_ISSUE, 0x0, 1, 100))
return -EBUSY;
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
ahci_start_fis_rx(ap);
/*
@@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
rc = xgene_ahci_do_hardreset(link, deadline, &online);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 7adcf3caabd0..e5d90977caec 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
+ if (!hpriv->stop_engine)
+ hpriv->stop_engine = ahci_stop_engine;
+
if (!hpriv->irq_handler)
hpriv->irq_handler = ahci_single_level_irq_intr;
}
@@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap)
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
{
int rc;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
/* disable DMA */
- rc = ahci_stop_engine(ap);
+ rc = hpriv->stop_engine(ap);
if (rc) {
*emsg = "failed to stop engine";
return rc;
@@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap)
int busy, rc;
/* stop engine */
- rc = ahci_stop_engine(ap);
+ rc = hpriv->stop_engine(ap);
if (rc)
goto out_restart;
@@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
@@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
hpriv->start_engine(ap);
}
sata_pmp_error_handler(ap);
if (!ata_dev_enabled(ap->link.device))
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
}
EXPORT_SYMBOL_GPL(ahci_error_handler);
@@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
return;
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
- rc = ahci_stop_engine(ap);
+ rc = hpriv->stop_engine(ap);
if (rc)
return;
@@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
return;
}
- rc = ahci_stop_engine(ap);
+ rc = hpriv->stop_engine(ap);
if (rc)
return;
@@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
return;
}
- rc = ahci_stop_engine(ap);
+ rc = hpriv->stop_engine(ap);
if (rc)
return;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 46a762442dc5..30cc8f1a31e1 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -25,7 +25,6 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/of_platform.h>
-#include <linux/reset.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
@@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
* following order:
* 1) Regulator
* 2) Clocks (through ahci_platform_enable_clks)
- * 3) Resets
- * 4) Phys
+ * 3) Phys
*
* If resource enabling fails at any point the previous enabled resources
* are disabled in reverse order.
@@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
if (rc)
goto disable_regulator;
- rc = reset_control_deassert(hpriv->rsts);
- if (rc)
- goto disable_clks;
-
rc = ahci_platform_enable_phys(hpriv);
if (rc)
- goto disable_resets;
+ goto disable_clks;
return 0;
-disable_resets:
- reset_control_assert(hpriv->rsts);
-
disable_clks:
ahci_platform_disable_clks(hpriv);
@@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
* following order:
* 1) Phys
* 2) Clocks (through ahci_platform_disable_clks)
- * 3) Resets
- * 4) Regulator
+ * 3) Regulator
*/
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
- reset_control_assert(hpriv->rsts);
-
ahci_platform_disable_clks(hpriv);
ahci_platform_disable_regulators(hpriv);
@@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
hpriv->clks[i] = clk;
}
- hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
- if (IS_ERR(hpriv->rsts)) {
- rc = PTR_ERR(hpriv->rsts);
- goto err_out;
- }
-
hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8bc71ca61e7f..68596bd4cf06 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4549,6 +4549,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
+ /* This specific Samsung model/firmware-rev does not handle LPM well */
+ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+
+ /* Sandisk devices which are known to not handle LPM well */
+ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c016829a38fd..513b260bcff1 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
{ }
#endif /* CONFIG_PM */
-static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
- va_list args)
+static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
+ const char *fmt, va_list args)
{
ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
ATA_EH_DESC_LEN - ehi->desc_len,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index aafb8cc03523..e67815b896fc 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
int rc;
int retry = 100;
- ahci_stop_engine(ap);
+ hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 4b1995e2d044..010ca101d412 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -285,13 +285,13 @@ static const struct sil24_cerr_info {
[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
"protocol mismatch" },
[PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
- "data directon mismatch" },
+ "data direction mismatch" },
[PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while writing" },
[PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while reading" },
[PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
- "invalid data directon for ATAPI CDB" },
+ "invalid data direction for ATAPI CDB" },
[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
"SGT not on qword boundary" },
[PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index d97c05690faa..4e46dc9e41ad 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -191,7 +191,7 @@ static char *res_strings[] = {
"reserved 37",
"reserved 38",
"reserved 39",
- "reseverd 40",
+ "reserved 40",
"reserved 41",
"reserved 42",
"reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 44abb8a0a5e5..be076606d30e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
if ((vcc->pop) && (skb1->len != 0))
{
vcc->pop(vcc, skb1);
- IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
+ IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
(long)skb1);)
}
else
@@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
if (status & TRANSMIT_DONE){
- IF_EVENT(printk("Tansmit Done Intr logic run\n");)
+ IF_EVENT(printk("Transmit Done Intr logic run\n");)
spin_lock_irqsave(&iadev->tx_lock, flags);
ia_tx_poll(iadev);
spin_unlock_irqrestore(&iadev->tx_lock, flags);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 1ef67db03c8e..9c9a22958717 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "uPD98401.h"
#include "uPD98402.h"
@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if @vaddr belongs to the device coherent pool and the caller
+ * should return @ret, or 0 if they should proceed with mapping memory from
+ * generic areas.
*/
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
- if (off < count && user_count <= (count - off)) {
+ if (off < count && user_count <= (count - off))
ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
+ page_to_pfn(virt_to_page(cpu_addr)) + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
- }
#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 31b5015b59fe..358354148dec 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -537,8 +537,8 @@ exit:
}
/**
- * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism
- * @fw_sysfs: firmware syfs information for the firmware to load
+ * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism
+ * @fw_sysfs: firmware sysfs information for the firmware to load
* @opt_flags: flags of options, FW_OPT_*
* @timeout: timeout to wait for the load
*
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index dfebc644ed35..f8255670a663 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,7 +6,7 @@
#include <linux/device.h>
/**
- * struct firmware_fallback_config - firmware fallback configuratioon settings
+ * struct firmware_fallback_config - firmware fallback configuration settings
*
* Helps describe and fine tune the fallback mechanism.
*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 264abaaff662..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static void lo_complete_rq(struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ blk_status_t ret = BLK_STS_OK;
- if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
- cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
- struct bio *bio = cmd->rq->bio;
-
- bio_advance(bio, cmd->ret);
- zero_fill_bio(bio);
+ if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ req_op(rq) != REQ_OP_READ) {
+ if (cmd->ret < 0)
+ ret = BLK_STS_IOERR;
+ goto end_io;
}
- blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
+ /*
+ * Short READ - if we got some data, advance our request and
+ * retry it. If we got no data, end the rest with EIO.
+ */
+ if (cmd->ret) {
+ blk_update_request(rq, BLK_STS_OK, cmd->ret);
+ cmd->ret = 0;
+ blk_mq_requeue_request(rq, true);
+ } else {
+ if (cmd->use_aio) {
+ struct bio *bio = rq->bio;
+
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
+ }
+ }
+ ret = BLK_STS_IOERR;
+end_io:
+ blk_mq_end_request(rq, ret);
+ }
}
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
unsigned int offset;
@@ -1103,11 +1125,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
+ if (type >= MAX_LO_CRYPT) {
+ err = -EINVAL;
+ goto exit;
+ }
xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
+ if (xfer == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
} else
xfer = NULL;
@@ -1283,12 +1309,13 @@ static int
loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
struct loop_info info;
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_old(&info64, &info);
if (!err && copy_to_user(arg, &info, sizeof(info)))
@@ -1300,12 +1327,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
static int
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
err = -EFAULT;
@@ -1529,12 +1557,13 @@ loop_get_status_compat(struct loop_device *lo,
struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_compat(&info64, arg);
return err;
@@ -1695,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = bd->rq;
+ struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct loop_device *lo = rq->q->queuedata;
- blk_mq_start_request(bd->rq);
+ blk_mq_start_request(rq);
if (lo->lo_state != Lo_bound)
return BLK_STS_IOERR;
- switch (req_op(cmd->rq)) {
+ switch (req_op(rq)) {
case REQ_OP_FLUSH:
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
@@ -1716,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
- cmd->css = cmd->rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+ cmd->css = rq->bio->bi_css;
css_get(cmd->css);
} else
#endif
@@ -1729,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = op_is_write(req_op(cmd->rq));
- struct loop_device *lo = cmd->rq->q->queuedata;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ const bool write = op_is_write(req_op(rq));
+ struct loop_device *lo = rq->q->queuedata;
int ret = 0;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1738,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed;
}
- ret = do_req_filebacked(lo, cmd->rq);
+ ret = do_req_filebacked(lo, rq);
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
cmd->ret = ret ? -EIO : 0;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(rq);
}
}
@@ -1760,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->rq = rq;
kthread_init_work(&cmd->work, loop_queue_work);
-
return 0;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
- struct request *rq;
bool use_aio; /* use AIO interface to handle I/O */
atomic_t ref; /* only for aio */
long ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 07dc5419bd63..33b36fea1d73 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
*/
enum {
Opt_queue_depth,
+ Opt_lock_timeout,
Opt_last_int,
/* int args above */
Opt_last_string,
@@ -740,11 +741,13 @@ enum {
Opt_read_write,
Opt_lock_on_read,
Opt_exclusive,
+ Opt_notrim,
Opt_err
};
static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
+ {Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
@@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_write, "rw"}, /* Alternate spelling */
{Opt_lock_on_read, "lock_on_read"},
{Opt_exclusive, "exclusive"},
+ {Opt_notrim, "notrim"},
{Opt_err, NULL}
};
struct rbd_options {
int queue_depth;
+ unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
bool exclusive;
+ bool trim;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
#define RBD_EXCLUSIVE_DEFAULT false
+#define RBD_TRIM_DEFAULT true
static int parse_rbd_opts_token(char *c, void *private)
{
@@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private)
}
rbd_opts->queue_depth = intval;
break;
+ case Opt_lock_timeout:
+ /* 0 is "wait forever" (i.e. infinite timeout) */
+ if (intval < 0 || intval > INT_MAX / 1000) {
+ pr_err("lock_timeout out of range\n");
+ return -EINVAL;
+ }
+ rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
+ break;
case Opt_read_only:
rbd_opts->read_only = true;
break;
@@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_exclusive:
rbd_opts->exclusive = true;
break;
+ case Opt_notrim:
+ rbd_opts->trim = false;
+ break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
case OBJ_OP_DISCARD:
return true;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2347,7 +2366,9 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
"copyup");
osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
- obj_req->copyup_bvecs, bytes);
+ obj_req->copyup_bvecs,
+ obj_req->copyup_bvec_count,
+ bytes);
switch (obj_req->img_request->op_type) {
case OBJ_OP_WRITE:
@@ -2466,7 +2487,7 @@ again:
}
return false;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2494,7 +2515,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
}
return false;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -3533,9 +3554,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
/*
* lock_rwsem must be held for read
*/
-static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
+static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
{
DEFINE_WAIT(wait);
+ unsigned long timeout;
+ int ret = 0;
+
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
+ return -EBLACKLISTED;
+
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
+ return 0;
+
+ if (!may_acquire) {
+ rbd_warn(rbd_dev, "exclusive lock required");
+ return -EROFS;
+ }
do {
/*
@@ -3547,12 +3581,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
TASK_UNINTERRUPTIBLE);
up_read(&rbd_dev->lock_rwsem);
- schedule();
+ timeout = schedule_timeout(ceph_timeout_jiffies(
+ rbd_dev->opts->lock_timeout));
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ ret = -EBLACKLISTED;
+ break;
+ }
+ if (!timeout) {
+ rbd_warn(rbd_dev, "timed out waiting for lock");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
finish_wait(&rbd_dev->lock_waitq, &wait);
+ return ret;
}
static void rbd_queue_workfn(struct work_struct *work)
@@ -3638,19 +3682,10 @@ static void rbd_queue_workfn(struct work_struct *work)
(op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
- if (rbd_dev->opts->exclusive) {
- rbd_warn(rbd_dev, "exclusive lock required");
- result = -EROFS;
- goto err_unlock;
- }
- rbd_wait_state_locked(rbd_dev);
- }
- if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
- result = -EBLACKLISTED;
+ result = rbd_wait_state_locked(rbd_dev,
+ !rbd_dev->opts->exclusive);
+ if (result)
goto err_unlock;
- }
}
img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
@@ -3902,7 +3937,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
- u64 segment_size;
+ unsigned int objset_bytes =
+ rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
int err;
/* create gendisk info */
@@ -3942,20 +3978,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
- /* set io sizes to object size */
- segment_size = rbd_obj_bytes(&rbd_dev->header);
- blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, segment_size);
- blk_queue_io_opt(q, segment_size);
+ blk_queue_io_min(q, objset_bytes);
+ blk_queue_io_opt(q, objset_bytes);
- /* enable the discard support */
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = segment_size;
- blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
- blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
+ if (rbd_dev->opts->trim) {
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ q->limits.discard_granularity = objset_bytes;
+ blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
+ blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
+ }
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -5179,8 +5214,10 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
+ rbd_opts->trim = RBD_TRIM_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@@ -5216,6 +5253,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
{
+ int ret;
+
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
return -EINVAL;
@@ -5223,9 +5262,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
/* FIXME: "rbd map --exclusive" should be in interruptible */
down_read(&rbd_dev->lock_rwsem);
- rbd_wait_state_locked(rbd_dev);
+ ret = rbd_wait_state_locked(rbd_dev, true);
up_read(&rbd_dev->lock_rwsem);
- if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock");
return -EROFS;
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
/* Select values for swim_select and swim_readbit */
#define READ_DATA_0 0x074
-#define TWOMEG_DRIVE 0x075
+#define ONEMEG_DRIVE 0x075
#define SINGLE_SIDED 0x076
#define DRIVE_PRESENT 0x077
#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
#define TRACK_ZERO 0x172
#define TACHO 0x173
#define READ_DATA_1 0x174
-#define MFM_MODE 0x175
+#define GCR_MODE 0x175
#define SEEK_COMPLETE 0x176
-#define ONEMEG_MEDIA 0x177
+#define TWOMEG_MEDIA 0x177
/* Bits in handshake register */
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
struct floppy_struct *g;
fs->disk_in = 1;
fs->write_protected = swim_readbit(base, WRITE_PROT);
- fs->type = swim_readbit(base, ONEMEG_MEDIA);
if (swim_track00(base))
printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
swim_track00(base);
+ fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
+ HD_MEDIA : DD_MEDIA;
+ fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
get_floppy_geometry(fs, 0, &g);
fs->total_secs = g->size;
fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10);
- swim_drive(base, INTERNAL_DRIVE);
+ swim_drive(base, fs->location);
swim_motor(base, ON);
swim_action(base, SETMFM);
if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
goto out;
}
+ set_capacity(fs->disk, fs->total_secs);
+
if (mode & FMODE_NDELAY)
return 0;
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
if (copy_to_user((void __user *) param, (void *) &floppy_type,
sizeof(struct floppy_struct)))
return -EFAULT;
- break;
-
- default:
- printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
- cmd);
- return -ENOSYS;
+ return 0;
}
- return 0;
+ return -ENOTTY;
}
static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
struct swim_priv *swd = data;
int drive = (*part & 3);
- if (drive > swd->floppy_count)
+ if (drive >= swd->floppy_count)
return NULL;
*part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
swim_motor(base, OFF);
- if (swim_readbit(base, SINGLE_SIDED))
- fs->head_number = 1;
- else
- fs->head_number = 2;
+ fs->type = HD_MEDIA;
+ fs->head_number = 2;
+
fs->ref_count = 0;
fs->ejected = 1;
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
/* scan floppy drives */
swim_drive(base, INTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, INTERNAL_DRIVE);
swim_drive(base, EXTERNAL_DRIVE);
- if (swim_readbit(base, DRIVE_PRESENT))
+ if (swim_readbit(base, DRIVE_PRESENT) &&
+ !swim_readbit(base, ONEMEG_DRIVE))
swim_add_floppy(swd, EXTERNAL_DRIVE);
/* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
&swd->lock);
if (!swd->unit[drive].disk->queue) {
err = -ENOMEM;
- put_disk(swd->unit[drive].disk);
goto exit_put_disks;
}
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
goto out;
}
- swim_base = ioremap(res->start, resource_size(res));
+ swim_base = (struct swim __iomem *)res->start;
if (!swim_base) {
ret = -ENOMEM;
goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
if (!get_swim_mode(swim_base)) {
printk(KERN_INFO "SWIM device not found !\n");
ret = -ENODEV;
- goto out_iounmap;
+ goto out_release_io;
}
/* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
if (!swd) {
ret = -ENOMEM;
- goto out_iounmap;
+ goto out_release_io;
}
platform_set_drvdata(dev, swd);
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
out_kfree:
kfree(swd);
-out_iounmap:
- iounmap(swim_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++)
floppy_eject(&swd->unit[drive]);
- iounmap(swd->base);
-
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
#define MOTOR_ON 2
#define RELAX 3 /* also eject in progress */
#define READ_DATA_0 4
-#define TWOMEG_DRIVE 5
+#define ONEMEG_DRIVE 5
#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
#define DRIVE_PRESENT 7
#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
#define TRACK_ZERO 10
#define TACHO 11
#define READ_DATA_1 12
-#define MFM_MODE 13
+#define GCR_MODE 13
#define SEEK_COMPLETE 14
-#define ONEMEG_MEDIA 15
+#define TWOMEG_MEDIA 15
/* Definitions of values used in writing and formatting */
#define DATA_ESCAPE 0x99
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 010f5f579e68..f3c643a0473c 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -197,6 +197,7 @@ config BT_HCIUART_BCM
config BT_HCIUART_QCA
bool "Qualcomm Atheros protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
select BT_HCIUART_H4
select BT_QCA
help
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 6659f113042c..99cde1f9467d 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -315,10 +315,12 @@ static int btbcm_read_info(struct hci_dev *hdev)
return 0;
}
-static const struct {
+struct bcm_subver_table {
u16 subver;
const char *name;
-} bcm_uart_subver_table[] = {
+};
+
+static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
@@ -330,12 +332,28 @@ static const struct {
{ }
};
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
+static const struct bcm_subver_table bcm_usb_subver_table[] = {
+ { 0x210b, "BCM43142A0" }, /* 001.001.011 */
+ { 0x2112, "BCM4314A0" }, /* 001.001.018 */
+ { 0x2118, "BCM20702A0" }, /* 001.001.024 */
+ { 0x2126, "BCM4335A0" }, /* 001.001.038 */
+ { 0x220e, "BCM20702A1" }, /* 001.002.014 */
+ { 0x230f, "BCM4354A2" }, /* 001.003.015 */
+ { 0x4106, "BCM4335B0" }, /* 002.001.006 */
+ { 0x410e, "BCM20702B0" }, /* 002.001.014 */
+ { 0x6109, "BCM4335C0" }, /* 003.001.009 */
+ { 0x610c, "BCM4354" }, /* 003.001.012 */
+ { }
+};
+
+int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
+ bool reinit)
{
- u16 subver, rev;
- const char *hw_name = NULL;
+ u16 subver, rev, pid, vid;
+ const char *hw_name = "BCM";
struct sk_buff *skb;
struct hci_rp_read_local_version *ver;
+ const struct bcm_subver_table *bcm_subver_table;
int i, err;
/* Reset */
@@ -354,30 +372,44 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
kfree_skb(skb);
/* Read controller information */
- err = btbcm_read_info(hdev);
- if (err)
- return err;
+ if (!reinit) {
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
+ }
- switch ((rev & 0xf000) >> 12) {
- case 0:
- case 1:
- case 2:
- case 3:
- for (i = 0; bcm_uart_subver_table[i].name; i++) {
- if (subver == bcm_uart_subver_table[i].subver) {
- hw_name = bcm_uart_subver_table[i].name;
- break;
- }
+ /* Upper nibble of rev should be between 0 and 3? */
+ if (((rev & 0xf000) >> 12) > 3)
+ return 0;
+
+ bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table :
+ bcm_uart_subver_table;
+
+ for (i = 0; bcm_subver_table[i].name; i++) {
+ if (subver == bcm_subver_table[i].subver) {
+ hw_name = bcm_subver_table[i].name;
+ break;
}
+ }
- snprintf(fw_name, len, "brcm/%s.hcd", hw_name ? : "BCM");
- break;
- default:
- return 0;
+ if (hdev->bus == HCI_USB) {
+ /* Read USB Product Info */
+ skb = btbcm_read_usb_product(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ vid = get_unaligned_le16(skb->data + 1);
+ pid = get_unaligned_le16(skb->data + 3);
+ kfree_skb(skb);
+
+ snprintf(fw_name, len, "brcm/%s-%4.4x-%4.4x.hcd",
+ hw_name, vid, pid);
+ } else {
+ snprintf(fw_name, len, "brcm/%s.hcd", hw_name);
}
bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
+ hw_name, (subver & 0xe000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
return 0;
@@ -386,30 +418,14 @@ EXPORT_SYMBOL_GPL(btbcm_initialize);
int btbcm_finalize(struct hci_dev *hdev)
{
- struct sk_buff *skb;
- struct hci_rp_read_local_version *ver;
- u16 subver, rev;
+ char fw_name[64];
int err;
- /* Reset */
- err = btbcm_reset(hdev);
+ /* Re-initialize */
+ err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
if (err)
return err;
- /* Read Local Version Info */
- skb = btbcm_read_local_version(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ver = (struct hci_rp_read_local_version *)skb->data;
- rev = le16_to_cpu(ver->hci_rev);
- subver = le16_to_cpu(ver->lmp_subver);
- kfree_skb(skb);
-
- bt_dev_info(hdev, "BCM (%3.3u.%3.3u.%3.3u) build %4.4u",
- (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
- (subver & 0x00ff), rev & 0x0fff);
-
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
@@ -418,95 +434,18 @@ int btbcm_finalize(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(btbcm_finalize);
-static const struct {
- u16 subver;
- const char *name;
-} bcm_usb_subver_table[] = {
- { 0x210b, "BCM43142A0" }, /* 001.001.011 */
- { 0x2112, "BCM4314A0" }, /* 001.001.018 */
- { 0x2118, "BCM20702A0" }, /* 001.001.024 */
- { 0x2126, "BCM4335A0" }, /* 001.001.038 */
- { 0x220e, "BCM20702A1" }, /* 001.002.014 */
- { 0x230f, "BCM4354A2" }, /* 001.003.015 */
- { 0x4106, "BCM4335B0" }, /* 002.001.006 */
- { 0x410e, "BCM20702B0" }, /* 002.001.014 */
- { 0x6109, "BCM4335C0" }, /* 003.001.009 */
- { 0x610c, "BCM4354" }, /* 003.001.012 */
- { }
-};
-
int btbcm_setup_patchram(struct hci_dev *hdev)
{
char fw_name[64];
const struct firmware *fw;
- u16 subver, rev, pid, vid;
- const char *hw_name = NULL;
struct sk_buff *skb;
- struct hci_rp_read_local_version *ver;
- int i, err;
-
- /* Reset */
- err = btbcm_reset(hdev);
- if (err)
- return err;
-
- /* Read Local Version Info */
- skb = btbcm_read_local_version(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ver = (struct hci_rp_read_local_version *)skb->data;
- rev = le16_to_cpu(ver->hci_rev);
- subver = le16_to_cpu(ver->lmp_subver);
- kfree_skb(skb);
+ int err;
- /* Read controller information */
- err = btbcm_read_info(hdev);
+ /* Initialize */
+ err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), false);
if (err)
return err;
- switch ((rev & 0xf000) >> 12) {
- case 0:
- case 3:
- for (i = 0; bcm_uart_subver_table[i].name; i++) {
- if (subver == bcm_uart_subver_table[i].subver) {
- hw_name = bcm_uart_subver_table[i].name;
- break;
- }
- }
-
- snprintf(fw_name, sizeof(fw_name), "brcm/%s.hcd",
- hw_name ? : "BCM");
- break;
- case 1:
- case 2:
- /* Read USB Product Info */
- skb = btbcm_read_usb_product(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- vid = get_unaligned_le16(skb->data + 1);
- pid = get_unaligned_le16(skb->data + 3);
- kfree_skb(skb);
-
- for (i = 0; bcm_usb_subver_table[i].name; i++) {
- if (subver == bcm_usb_subver_table[i].subver) {
- hw_name = bcm_usb_subver_table[i].name;
- break;
- }
- }
-
- snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
- hw_name ? : "BCM", vid, pid);
- break;
- default:
- return 0;
- }
-
- bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
-
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
bt_dev_info(hdev, "BCM: Patch %s not found", fw_name);
@@ -517,25 +456,11 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
release_firmware(fw);
- /* Reset */
- err = btbcm_reset(hdev);
+ /* Re-initialize */
+ err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true);
if (err)
return err;
- /* Read Local Version Info */
- skb = btbcm_read_local_version(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ver = (struct hci_rp_read_local_version *)skb->data;
- rev = le16_to_cpu(ver->hci_rev);
- subver = le16_to_cpu(ver->lmp_subver);
- kfree_skb(skb);
-
- bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
-
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index cfe6ad4cc621..5346515c880c 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -73,7 +73,8 @@ int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw);
int btbcm_setup_patchram(struct hci_dev *hdev);
int btbcm_setup_apple(struct hci_dev *hdev);
-int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len);
+int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len,
+ bool reinit);
int btbcm_finalize(struct hci_dev *hdev);
#else
@@ -104,7 +105,7 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev)
}
static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name,
- size_t len)
+ size_t len, bool reinit)
{
return 0;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 2793d4180d2f..8219816c54a0 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -127,28 +127,41 @@ static void rome_tlv_check_data(struct rome_config *config,
BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
BT_DBG("Length\t\t : %d bytes", length);
+ config->dnld_mode = ROME_SKIP_EVT_NONE;
+
switch (config->type) {
case TLV_TYPE_PATCH:
tlv_patch = (struct tlv_type_patch *)tlv->data;
- BT_DBG("Total Length\t\t : %d bytes",
+
+ /* For Rome version 1.1 to 3.1, all segment commands
+ * are acked by a vendor specific event (VSE).
+ * For Rome >= 3.2, the download mode field indicates
+ * if VSE is skipped by the controller.
+ * In case VSE is skipped, only the last segment is acked.
+ */
+ config->dnld_mode = tlv_patch->download_mode;
+
+ BT_DBG("Total Length : %d bytes",
le32_to_cpu(tlv_patch->total_size));
- BT_DBG("Patch Data Length\t : %d bytes",
+ BT_DBG("Patch Data Length : %d bytes",
le32_to_cpu(tlv_patch->data_length));
BT_DBG("Signing Format Version : 0x%x",
tlv_patch->format_version);
- BT_DBG("Signature Algorithm\t : 0x%x",
+ BT_DBG("Signature Algorithm : 0x%x",
tlv_patch->signature);
- BT_DBG("Reserved\t\t : 0x%x",
- le16_to_cpu(tlv_patch->reserved1));
- BT_DBG("Product ID\t\t : 0x%04x",
+ BT_DBG("Download mode : 0x%x",
+ tlv_patch->download_mode);
+ BT_DBG("Reserved : 0x%x",
+ tlv_patch->reserved1);
+ BT_DBG("Product ID : 0x%04x",
le16_to_cpu(tlv_patch->product_id));
- BT_DBG("Rom Build Version\t : 0x%04x",
+ BT_DBG("Rom Build Version : 0x%04x",
le16_to_cpu(tlv_patch->rom_build));
- BT_DBG("Patch Version\t\t : 0x%04x",
+ BT_DBG("Patch Version : 0x%04x",
le16_to_cpu(tlv_patch->patch_version));
- BT_DBG("Reserved\t\t : 0x%x",
+ BT_DBG("Reserved : 0x%x",
le16_to_cpu(tlv_patch->reserved2));
- BT_DBG("Patch Entry Address\t : 0x%x",
+ BT_DBG("Patch Entry Address : 0x%x",
le32_to_cpu(tlv_patch->entry));
break;
@@ -194,8 +207,8 @@ static void rome_tlv_check_data(struct rome_config *config,
}
}
-static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
- const u8 *data)
+static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size,
+ const u8 *data, enum rome_tlv_dnld_mode mode)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
@@ -203,12 +216,14 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
int err = 0;
- BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
-
cmd[0] = EDL_PATCH_TLV_REQ_CMD;
cmd[1] = seg_size;
memcpy(cmd + 2, data, seg_size);
+ if (mode == ROME_SKIP_EVT_VSE_CC || mode == ROME_SKIP_EVT_VSE)
+ return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2,
+ cmd);
+
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
@@ -245,47 +260,12 @@ out:
return err;
}
-static int rome_tlv_download_request(struct hci_dev *hdev,
- const struct firmware *fw)
-{
- const u8 *buffer, *data;
- int total_segment, remain_size;
- int ret, i;
-
- if (!fw || !fw->data)
- return -EINVAL;
-
- total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
- remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
-
- BT_DBG("%s: Total segment num %d remain size %d total size %zu",
- hdev->name, total_segment, remain_size, fw->size);
-
- data = fw->data;
- for (i = 0; i < total_segment; i++) {
- buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
- ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
- buffer);
- if (ret < 0)
- return -EIO;
- }
-
- if (remain_size) {
- buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
- ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
- buffer);
- if (ret < 0)
- return -EIO;
- }
-
- return 0;
-}
-
static int rome_download_firmware(struct hci_dev *hdev,
struct rome_config *config)
{
const struct firmware *fw;
- int ret;
+ const u8 *segment;
+ int ret, remain, i = 0;
bt_dev_info(hdev, "ROME Downloading %s", config->fwname);
@@ -298,10 +278,24 @@ static int rome_download_firmware(struct hci_dev *hdev,
rome_tlv_check_data(config, fw);
- ret = rome_tlv_download_request(hdev, fw);
- if (ret) {
- BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
- config->fwname, ret);
+ segment = fw->data;
+ remain = fw->size;
+ while (remain > 0) {
+ int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain);
+
+ bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize);
+
+ remain -= segsize;
+ /* The last segment is always acked regardless download mode */
+ if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT)
+ config->dnld_mode = ROME_SKIP_EVT_NONE;
+
+ ret = rome_tlv_send_segment(hdev, segsize, segment,
+ config->dnld_mode);
+ if (ret)
+ break;
+
+ segment += segsize;
}
release_firmware(fw);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 65e994b96c47..13d77fd873b6 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -61,6 +61,13 @@ enum qca_bardrate {
QCA_BAUDRATE_RESERVED
};
+enum rome_tlv_dnld_mode {
+ ROME_SKIP_EVT_NONE,
+ ROME_SKIP_EVT_VSE,
+ ROME_SKIP_EVT_CC,
+ ROME_SKIP_EVT_VSE_CC
+};
+
enum rome_tlv_type {
TLV_TYPE_PATCH = 1,
TLV_TYPE_NVM
@@ -70,6 +77,7 @@ struct rome_config {
u8 type;
char fwname[64];
uint8_t user_baud_rate;
+ enum rome_tlv_dnld_mode dnld_mode;
};
struct edl_event_hdr {
@@ -94,7 +102,8 @@ struct tlv_type_patch {
__le32 data_length;
__u8 format_version;
__u8 signature;
- __le16 reserved1;
+ __u8 download_mode;
+ __u8 reserved1;
__le16 product_id;
__le16 rom_build;
__le16 patch_version;
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 2c9a5fc9137d..7df3eed1ef5e 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -65,6 +65,7 @@ static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data,
{
struct btqcomsmd *btq = priv;
+ btq->hdev->stat.byte_rx += count;
return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count);
}
@@ -76,12 +77,21 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
switch (hci_skb_pkt_type(skb)) {
case HCI_ACLDATA_PKT:
ret = rpmsg_send(btq->acl_channel, skb->data, skb->len);
+ if (ret) {
+ hdev->stat.err_tx++;
+ break;
+ }
hdev->stat.acl_tx++;
hdev->stat.byte_tx += skb->len;
break;
case HCI_COMMAND_PKT:
ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len);
+ if (ret) {
+ hdev->stat.err_tx++;
+ break;
+ }
hdev->stat.cmd_tx++;
+ hdev->stat.byte_tx += skb->len;
break;
default:
ret = -EILSEQ;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c8c8b0b8d333..91882f54c7bd 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -276,6 +276,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -399,6 +401,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
},
},
+ {
+ /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ },
+ },
{}
};
@@ -2852,6 +2861,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
#endif
+static void btusb_check_needs_reset_resume(struct usb_interface *intf)
+{
+ if (dmi_check_system(btusb_needs_reset_resume_table))
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+}
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -2974,9 +2989,6 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
- if (dmi_check_system(btusb_needs_reset_resume_table))
- interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
-
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
if (err)
@@ -3064,6 +3076,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ btusb_check_needs_reset_resume(intf);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 441f5e1deb11..f06f0f1132fb 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -501,7 +501,7 @@ static int bcm_setup(struct hci_uart *hu)
hu->hdev->set_diag = bcm_set_diag;
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name));
+ err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name), false);
if (err)
return err;
@@ -794,19 +794,21 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
{ },
};
-#ifdef CONFIG_ACPI
-/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
-static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = {
- { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
- .ident = "Lenovo ThinkPad 8",
+/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */
+static const struct dmi_system_id bcm_broken_irq_dmi_table[] = {
+ {
+ .ident = "Meegopad T08",
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR,
+ "To be filled by OEM."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"),
},
},
{ }
};
+#ifdef CONFIG_ACPI
static int bcm_resource(struct acpi_resource *ares, void *data)
{
struct bcm_device *dev = data;
@@ -904,6 +906,8 @@ static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
static int bcm_get_resources(struct bcm_device *dev)
{
+ const struct dmi_system_id *dmi_id;
+
dev->name = dev_name(dev->dev);
if (x86_apple_machine && !bcm_apple_get_resources(dev))
@@ -936,6 +940,13 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->irq = gpiod_to_irq(gpio);
}
+ dmi_id = dmi_first_match(bcm_broken_irq_dmi_table);
+ if (dmi_id) {
+ dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n",
+ dmi_id->ident);
+ dev->irq = 0;
+ }
+
dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
return 0;
}
@@ -944,7 +955,6 @@ static int bcm_get_resources(struct bcm_device *dev)
static int bcm_acpi_probe(struct bcm_device *dev)
{
LIST_HEAD(resources);
- const struct dmi_system_id *dmi_id;
const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios;
struct resource_entry *entry;
int ret;
@@ -991,13 +1001,6 @@ static int bcm_acpi_probe(struct bcm_device *dev)
dev->irq_active_low = irq_polarity;
dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n",
dev->irq_active_low ? "low" : "high");
- } else {
- dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
- if (dmi_id) {
- dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
- dmi_id->ident);
- dev->irq_active_low = true;
- }
}
return 0;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b6a71705b7d6..954213e5daa5 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -447,6 +447,8 @@ static int hci_uart_setup(struct hci_dev *hdev)
btbcm_check_bdaddr(hdev);
break;
#endif
+ default:
+ break;
}
done:
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 05ec530b8a3a..f05382b5a65d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -29,7 +29,12 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -50,6 +55,9 @@
#define IBS_TX_IDLE_TIMEOUT_MS 2000
#define BAUDRATE_SETTLE_TIMEOUT_MS 300
+/* susclk rate */
+#define SUSCLK_RATE_32KHZ 32768
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -111,6 +119,12 @@ struct qca_data {
u64 votes_off;
};
+struct qca_serdev {
+ struct hci_uart serdev_hu;
+ struct gpio_desc *bt_en;
+ struct clk *susclk;
+};
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -386,6 +400,7 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
+ struct qca_serdev *qcadev;
struct qca_data *qca;
BT_DBG("hu %p qca_open", hu);
@@ -444,6 +459,13 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+ if (hu->serdev) {
+ serdev_device_open(hu->serdev);
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ }
+
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -512,6 +534,7 @@ static int qca_flush(struct hci_uart *hu)
/* Close protocol */
static int qca_close(struct hci_uart *hu)
{
+ struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
BT_DBG("hu %p qca close", hu);
@@ -525,6 +548,13 @@ static int qca_close(struct hci_uart *hu)
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
+ if (hu->serdev) {
+ serdev_device_close(hu->serdev);
+
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
+ }
+
kfree_skb(qca->rx_skb);
hu->priv = NULL;
@@ -885,6 +915,14 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
return 0;
}
+static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ if (hu->serdev)
+ serdev_device_set_baudrate(hu->serdev, speed);
+ else
+ hci_uart_set_baudrate(hu, speed);
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
@@ -905,7 +943,7 @@ static int qca_setup(struct hci_uart *hu)
speed = hu->proto->init_speed;
if (speed)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
/* Setup user speed if needed */
speed = 0;
@@ -924,7 +962,7 @@ static int qca_setup(struct hci_uart *hu)
ret);
return ret;
}
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
}
/* Setup patch / NVM configurations */
@@ -935,6 +973,12 @@ static int qca_setup(struct hci_uart *hu)
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
+ } else if (ret == -EAGAIN) {
+ /*
+ * Userspace firmware loader will return -EAGAIN in case no
+ * patch/nvm-config is found, so run with original fw/config.
+ */
+ ret = 0;
}
/* Setup bdaddr */
@@ -958,12 +1002,80 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static int qca_serdev_probe(struct serdev_device *serdev)
+{
+ struct qca_serdev *qcadev;
+ int err;
+
+ qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
+ if (!qcadev)
+ return -ENOMEM;
+
+ qcadev->serdev_hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, qcadev);
+
+ qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(qcadev->bt_en)) {
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
+ }
+
+ qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
+
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
+ if (err)
+ clk_disable_unprepare(qcadev->susclk);
+
+ return err;
+}
+
+static void qca_serdev_remove(struct serdev_device *serdev)
+{
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&qcadev->serdev_hu);
+
+ clk_disable_unprepare(qcadev->susclk);
+}
+
+static const struct of_device_id qca_bluetooth_of_match[] = {
+ { .compatible = "qcom,qca6174-bt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
+
+static struct serdev_device_driver qca_serdev_driver = {
+ .probe = qca_serdev_probe,
+ .remove = qca_serdev_remove,
+ .driver = {
+ .name = "hci_uart_qca",
+ .of_match_table = qca_bluetooth_of_match,
+ },
+};
+
int __init qca_init(void)
{
+ serdev_device_driver_register(&qca_serdev_driver);
+
return hci_uart_register_proto(&qca_proto);
}
int __exit qca_deinit(void)
{
+ serdev_device_driver_unregister(&qca_serdev_driver);
+
return hci_uart_unregister_proto(&qca_proto);
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..6dc177bf4c42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -33,6 +33,7 @@ config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
select INDIRECT_PIO
+ select MFD_CORE if ACPI
help
Driver to enable I/O access to devices attached to the Low Pin
Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index c381c8e396fc..79d8c84693a1 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
return 0;
}
-int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
u32 *gp;
@@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
return 0;
}
-void null_cache_flush(void)
+static void null_cache_flush(void)
{
mb();
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e027e7fa1472..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
@@ -427,8 +428,9 @@ struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 0))
+#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
+static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
__u32 out[CHACHA20_BLOCK_WORDS]);
@@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
+static struct ratelimit_state unseeded_warning =
+ RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ for_each_node(i)
+ kfree(pool[i]);
+ kfree(pool);
+ }
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+ schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
+/*
+ * crng_fast_load() can be called by code in the interrupt service
+ * path. So we can't afford to dilly-dally.
+ */
static int crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
@@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
- if (crng_ready()) {
+ if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
@@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len)
return 1;
}
+/*
+ * crng_slow_load() is called by add_device_randomness, which has two
+ * attributes. (1) We can't trust the buffer passed to it is
+ * guaranteed to be unpredictable (so it might not have any entropy at
+ * all), and (2) it doesn't have the performance constraints of
+ * crng_fast_load().
+ *
+ * So we do something more comprehensive which is guaranteed to touch
+ * all of the primary_crng's state, and which uses a LFSR with a
+ * period of 255 as part of the mixing algorithm. Finally, we do
+ * *not* advance crng_init_cnt since buffer we may get may be something
+ * like a fixed DMI table (for example), which might very well be
+ * unique to the machine, but is otherwise unvarying.
+ */
+static int crng_slow_load(const char *cp, size_t len)
+{
+ unsigned long flags;
+ static unsigned char lfsr = 1;
+ unsigned char tmp;
+ unsigned i, max = CHACHA20_KEY_SIZE;
+ const char * src_buf = cp;
+ char * dest_buf = (char *) &primary_crng.state[4];
+
+ if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+ return 0;
+ if (crng_init != 0) {
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 0;
+ }
+ if (len > max)
+ max = len;
+
+ for (i = 0; i < max ; i++) {
+ tmp = lfsr;
+ lfsr >>= 1;
+ if (tmp & 1)
+ lfsr ^= 0xE1;
+ tmp = dest_buf[i % CHACHA20_KEY_SIZE];
+ dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
+ lfsr += (tmp << 3) | (tmp >> 5);
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 1;
+}
+
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
{
unsigned long flags;
@@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
_crng_backtrack_protect(&primary_crng, buf.block,
CHACHA20_KEY_SIZE);
}
- spin_lock_irqsave(&primary_crng.lock, flags);
+ spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
@@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
+ spin_unlock_irqrestore(&crng->lock, flags);
if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy();
+ numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("random: %d get_random_xx warning(s) missed "
+ "due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("random: %d urandom warning(s) missed "
+ "due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
}
@@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng,
{
unsigned long v, flags;
- if (crng_init > 1 &&
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
+ if (crng_ready() &&
+ (time_after(crng_global_init_time, crng->init_time) ||
+ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
spin_lock_irqsave(&crng->lock, flags);
if (arch_get_random_long(&v))
@@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
- if (!crng_ready()) {
- crng_fast_load(buf, size);
- return;
- }
+ if (!crng_ready() && size)
+ crng_slow_load(buf, size);
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
@@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) {
@@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
- pr_notice("random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
+ if (__ratelimit(&unseeded_warning))
+ pr_notice("random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
}
/*
@@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
-#ifdef CONFIG_NUMA
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-#endif
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize(crng);
- pool[i] = crng;
+ crng_global_init_time = jiffies;
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
}
- mb();
- crng_node_pool = pool;
-#endif
return 0;
}
early_initcall(rand_initialize);
@@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
- printk(KERN_NOTICE "random: %s: uninitialized urandom read "
- "(%zd bytes read)\n",
- current->comm, nbytes);
+ if (__ratelimit(&urandom_warning))
+ printk(KERN_NOTICE "random: %s: uninitialized "
+ "urandom read (%zd bytes read)\n",
+ current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
input_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
+ case RNDRESEEDCRNG:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (crng_init < 2)
+ return -ENODATA;
+ crng_reseed(&primary_crng, NULL);
+ crng_global_init_time = jiffies - 1;
+ return 0;
default:
return -EINVAL;
}
@@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- if (!crng_ready()) {
+ if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count);
return;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 468f06134012..21085515814f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
}
}
-static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
int pages)
{
struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
return buf;
}
- if (is_rproc_serial(vq->vdev)) {
+ if (is_rproc_serial(vdev)) {
/*
* Allocate DMA memory from ancestor. When a virtio
* device is created by remoteproc, the DMA memory is
* associated with the grandparent device:
* vdev => rproc => platform-dev.
*/
- if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+ if (!vdev->dev.parent || !vdev->dev.parent->parent)
goto free_buf;
- buf->dev = vq->vdev->dev.parent->parent;
+ buf->dev = vdev->dev.parent->parent;
/* Increase device refcnt to avoid freeing it */
get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count);
- buf = alloc_buf(port->out_vq, count, 0);
+ buf = alloc_buf(port->portdev->vdev, count, 0);
if (!buf)
return -ENOMEM;
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (ret < 0)
goto error_out;
- buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+ buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
if (!buf) {
ret = -ENOMEM;
goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
nr_added_bufs = 0;
do {
- buf = alloc_buf(vq, PAGE_SIZE, 0);
+ buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
break;
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
}
-static void remove_controlq_data(struct ports_device *portdev)
+static void virtcons_remove(struct virtio_device *vdev)
{
- struct port_buffer *buf;
- unsigned int len;
+ struct ports_device *portdev;
+ struct port *port, *port2;
- if (!use_multiport(portdev))
- return;
+ portdev = vdev->priv;
- while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf, true);
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&portdev->list);
+ spin_unlock_irq(&pdrvdata_lock);
- while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf, true);
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
+ else
+ cancel_work_sync(&portdev->config_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+ /*
+ * When yanking out a device, we immediately lose the
+ * (device-side) queues. So there's no point in keeping the
+ * guest side around till we drop our final reference. This
+ * also means that any ports which are in an open state will
+ * have to just stop using the port, as the vqs are going
+ * away.
+ */
+ remove_vqs(portdev);
+ kfree(portdev);
}
/*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
+ INIT_LIST_HEAD(&portdev->list);
virtio_device_ready(portdev->vdev);
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
if (!nr_added_bufs) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
- err = -ENOMEM;
- goto free_vqs;
+ /*
+ * The host might want to notify mgmt sw about device
+ * add failure.
+ */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
+ /* Device was functional: we need full cleanup. */
+ virtcons_remove(vdev);
+ return -ENOMEM;
}
} else {
/*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
return 0;
-free_vqs:
- /* The host might want to notify mgmt sw about device add failure */
- __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
- VIRTIO_CONSOLE_DEVICE_READY, 0);
- remove_vqs(portdev);
free_chrdev:
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free:
@@ -2132,43 +2155,6 @@ fail:
return err;
}
-static void virtcons_remove(struct virtio_device *vdev)
-{
- struct ports_device *portdev;
- struct port *port, *port2;
-
- portdev = vdev->priv;
-
- spin_lock_irq(&pdrvdata_lock);
- list_del(&portdev->list);
- spin_unlock_irq(&pdrvdata_lock);
-
- /* Disable interrupts for vqs */
- vdev->config->reset(vdev);
- /* Finish up work that's lined up */
- if (use_multiport(portdev))
- cancel_work_sync(&portdev->control_work);
- else
- cancel_work_sync(&portdev->config_work);
-
- list_for_each_entry_safe(port, port2, &portdev->ports, list)
- unplug_port(port);
-
- unregister_chrdev(portdev->chr_major, "virtio-portsdev");
-
- /*
- * When yanking out a device, we immediately lose the
- * (device-side) queues. So there's no point in keeping the
- * guest side around till we drop our final reference. This
- * also means that any ports which are in an open state will
- * have to just stop using the port, as the vqs are going
- * away.
- */
- remove_controlq_data(portdev);
- remove_vqs(portdev);
- kfree(portdev);
-}
-
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
*/
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
- remove_controlq_data(portdev);
list_for_each_entry(port, &portdev->ports, list) {
virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 7ae23b25b406..34968a381d0f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -55,8 +55,10 @@ config COMMON_CLK_RK808
by control register.
config COMMON_CLK_HI655X
- tristate "Clock driver for Hi655x"
- depends on MFD_HI655X_PMIC || COMPILE_TEST
+ tristate "Clock driver for Hi655x" if EXPERT
+ depends on (MFD_HI655X_PMIC || COMPILE_TEST)
+ depends on REGMAP
+ default MFD_HI655X_PMIC
---help---
This driver supports the hi655x PMIC clock. This
multi-function device has one fixed-rate oscillator, clocked
@@ -101,6 +103,15 @@ config COMMON_CLK_SI514
This driver supports the Silicon Labs 514 programmable clock
generator.
+config COMMON_CLK_SI544
+ tristate "Clock driver for SiLabs 544 devices"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the Silicon Labs 544 programmable clock
+ generator.
+
config COMMON_CLK_SI570
tristate "Clock driver for SiLabs 570 and compatible devices"
depends on I2C
@@ -248,6 +259,24 @@ config COMMON_CLK_VC5
This driver supports the IDT VersaClock 5 and VersaClock 6
programmable clock generators.
+config COMMON_CLK_STM32MP157
+ def_bool COMMON_CLK && MACH_STM32MP157
+ help
+ ---help---
+ Support for stm32mp157 SoC family clocks
+
+config COMMON_CLK_STM32F
+ def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
+ help
+ ---help---
+ Support for stm32f4 and stm32f7 SoC families clocks
+
+config COMMON_CLK_STM32H7
+ def_bool COMMON_CLK && MACH_STM32H743
+ help
+ ---help---
+ Support for stm32h7 SoC family clocks
+
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 6605513eaa94..de6d06ac790b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -45,9 +45,11 @@ obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
+obj-$(CONFIG_COMMON_CLK_SI544) += clk-si544.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
-obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
-obj-$(CONFIG_ARCH_STM32) += clk-stm32h7.o
+obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
+obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
+obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
@@ -89,6 +92,7 @@ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
+obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index a07f6451694a..fa0d5c8611a0 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -602,9 +602,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
- cprman_write(cprman, data->cm_ctrl_reg,
- cprman_read(cprman, data->cm_ctrl_reg) |
- CM_PLL_ANARST);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@@ -640,6 +638,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
return 0;
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index e8ea81c30f0c..a2f8c42e527a 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -541,7 +541,7 @@ probe_err:
return ret;
}
-static int cs2000_resume(struct device *dev)
+static int __maybe_unused cs2000_resume(struct device *dev)
{
struct cs2000_priv *priv = dev_get_drvdata(dev);
@@ -549,7 +549,7 @@ static int cs2000_resume(struct device *dev)
}
static const struct dev_pm_ops cs2000_pm_ops = {
- .resume_early = cs2000_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cs2000_resume)
};
static struct i2c_driver cs2000_driver = {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index b49942b9fe50..b6234a5da12d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -28,12 +28,10 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define div_mask(width) ((1 << (width)) - 1)
-
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
u8 width)
{
- unsigned int maxdiv = 0, mask = div_mask(width);
+ unsigned int maxdiv = 0, mask = clk_div_mask(width);
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
@@ -57,12 +55,12 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
unsigned long flags)
{
if (flags & CLK_DIVIDER_ONE_BASED)
- return div_mask(width);
+ return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
- return 1 << div_mask(width);
+ return 1 << clk_div_mask(width);
if (table)
return _get_table_maxdiv(table, width);
- return div_mask(width) + 1;
+ return clk_div_mask(width) + 1;
}
static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -84,7 +82,7 @@ static unsigned int _get_div(const struct clk_div_table *table,
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
- return val ? val : div_mask(width) + 1;
+ return val ? val : clk_div_mask(width) + 1;
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -109,7 +107,7 @@ static unsigned int _get_val(const struct clk_div_table *table,
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
- return (div == div_mask(width) + 1) ? 0 : div;
+ return (div == clk_div_mask(width) + 1) ? 0 : div;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -141,7 +139,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
unsigned int val;
val = clk_readl(divider->reg) >> divider->shift;
- val &= div_mask(divider->width);
+ val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags, divider->width);
@@ -344,19 +342,43 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_round_rate_parent);
+long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val)
+{
+ int div;
+
+ div = _get_div(table, val, flags, width);
+
+ /* Even a read-only clock can propagate a rate change */
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
+ if (!parent)
+ return -EINVAL;
+
+ *prate = clk_hw_round_rate(parent, rate * div);
+ }
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
+
+
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_divider *divider = to_clk_divider(hw);
- int bestdiv;
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- bestdiv = clk_readl(divider->reg) >> divider->shift;
- bestdiv &= div_mask(divider->width);
- bestdiv = _get_div(divider->table, bestdiv, divider->flags,
- divider->width);
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ u32 val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
}
return divider_round_rate(hw, rate, prate, divider->table,
@@ -376,7 +398,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
value = _get_val(table, div, flags, width);
- return min_t(unsigned int, value, div_mask(width));
+ return min_t(unsigned int, value, clk_div_mask(width));
}
EXPORT_SYMBOL_GPL(divider_get_val);
@@ -399,10 +421,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
__acquire(divider->lock);
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
- val = div_mask(divider->width) << (divider->shift + 16);
+ val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
val = clk_readl(divider->reg);
- val &= ~(div_mask(divider->width) << divider->shift);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
clk_writel(val, divider->reg);
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 151513c655c3..40af4fbab4d2 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -73,14 +73,14 @@ static u8 clk_gpio_mux_get_parent(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
- return gpiod_get_value(clk->gpiod);
+ return gpiod_get_value_cansleep(clk->gpiod);
}
static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_gpio *clk = to_clk_gpio(hw);
- gpiod_set_value(clk->gpiod, index);
+ gpiod_set_value_cansleep(clk->gpiod, index);
return 0;
}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 39cabe157163..1628b93655ed 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -26,35 +26,24 @@
* parent - parent is adjustable through clk_set_parent
*/
-static u8 clk_mux_get_parent(struct clk_hw *hw)
+int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+ unsigned int val)
{
- struct clk_mux *mux = to_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
- /*
- * FIXME need a mux-specific flag to determine if val is bitwise or numeric
- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
- * to 0x7 (index starts at one)
- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
- * val = 0x4 really means "bit 2, index starts at bit 0"
- */
- val = clk_readl(mux->reg) >> mux->shift;
- val &= mux->mask;
-
- if (mux->table) {
+ if (table) {
int i;
for (i = 0; i < num_parents; i++)
- if (mux->table[i] == val)
+ if (table[i] == val)
return i;
return -EINVAL;
}
- if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
- if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
@@ -62,36 +51,58 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
+EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
-static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
{
- struct clk_mux *mux = to_clk_mux(hw);
- u32 val;
- unsigned long flags = 0;
+ unsigned int val = index;
- if (mux->table) {
- index = mux->table[index];
+ if (table) {
+ val = table[index];
} else {
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = 1 << index;
+ if (flags & CLK_MUX_INDEX_BIT)
+ val = 1 << index;
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+ if (flags & CLK_MUX_INDEX_ONE)
+ val++;
}
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
+
+static u8 clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+
+ val = clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
__acquire(mux->lock);
if (mux->flags & CLK_MUX_HIWORD_MASK) {
- val = mux->mask << (mux->shift + 16);
+ reg = mux->mask << (mux->shift + 16);
} else {
- val = clk_readl(mux->reg);
- val &= ~(mux->mask << mux->shift);
+ reg = clk_readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
}
- val |= index << mux->shift;
- clk_writel(val, mux->reg);
+ val = val << mux->shift;
+ reg |= val;
+ clk_writel(reg, mux->reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -101,10 +112,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
+static int clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
- .determine_rate = __clk_mux_determine_rate,
+ .determine_rate = clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
new file mode 100644
index 000000000000..1c96a9f6c022
--- /dev/null
+++ b/drivers/clk/clk-si544.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Silicon Labs Si544 Programmable Oscillator
+ * Copyright (C) 2018 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* I2C registers (decimal as in datasheet) */
+#define SI544_REG_CONTROL 7
+#define SI544_REG_OE_STATE 17
+#define SI544_REG_HS_DIV 23
+#define SI544_REG_LS_HS_DIV 24
+#define SI544_REG_FBDIV0 26
+#define SI544_REG_FBDIV8 27
+#define SI544_REG_FBDIV16 28
+#define SI544_REG_FBDIV24 29
+#define SI544_REG_FBDIV32 30
+#define SI544_REG_FBDIV40 31
+#define SI544_REG_FCAL_OVR 69
+#define SI544_REG_ADPLL_DELTA_M0 231
+#define SI544_REG_ADPLL_DELTA_M8 232
+#define SI544_REG_ADPLL_DELTA_M16 233
+#define SI544_REG_PAGE_SELECT 255
+
+/* Register values */
+#define SI544_CONTROL_RESET BIT(7)
+#define SI544_CONTROL_MS_ICAL2 BIT(3)
+
+#define SI544_OE_STATE_ODC_OE BIT(0)
+
+/* Max freq depends on speed grade */
+#define SI544_MIN_FREQ 200000U
+
+/* Si544 Internal oscilator runs at 55.05 MHz */
+#define FXO 55050000U
+
+/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
+#define FVCO_MIN 10800000000ULL
+
+#define HS_DIV_MAX 2046
+#define HS_DIV_MAX_ODD 33
+
+/* Lowest frequency synthesizeable using only the HS divider */
+#define MIN_HSDIV_FREQ (FVCO_MIN / HS_DIV_MAX)
+
+enum si544_speed_grade {
+ si544a,
+ si544b,
+ si544c,
+};
+
+struct clk_si544 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+ enum si544_speed_grade speed_grade;
+};
+#define to_clk_si544(_hw) container_of(_hw, struct clk_si544, hw)
+
+/**
+ * struct clk_si544_muldiv - Multiplier/divider settings
+ * @fb_div_frac: integer part of feedback divider (32 bits)
+ * @fb_div_int: fractional part of feedback divider (11 bits)
+ * @hs_div: 1st divider, 5..2046, must be even when >33
+ * @ls_div_bits: 2nd divider, as 2^x, range 0..5
+ * If ls_div_bits is non-zero, hs_div must be even
+ */
+struct clk_si544_muldiv {
+ u32 fb_div_frac;
+ u16 fb_div_int;
+ u16 hs_div;
+ u8 ls_div_bits;
+};
+
+/* Enables or disables the output driver */
+static int si544_enable_output(struct clk_si544 *data, bool enable)
+{
+ return regmap_update_bits(data->regmap, SI544_REG_OE_STATE,
+ SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
+}
+
+/* Retrieve clock multiplier and dividers from hardware */
+static int si544_get_muldiv(struct clk_si544 *data,
+ struct clk_si544_muldiv *settings)
+{
+ int err;
+ u8 reg[6];
+
+ err = regmap_bulk_read(data->regmap, SI544_REG_HS_DIV, reg, 2);
+ if (err)
+ return err;
+
+ settings->ls_div_bits = (reg[1] >> 4) & 0x07;
+ settings->hs_div = (reg[1] & 0x07) << 8 | reg[0];
+
+ err = regmap_bulk_read(data->regmap, SI544_REG_FBDIV0, reg, 6);
+ if (err)
+ return err;
+
+ settings->fb_div_int = reg[4] | (reg[5] & 0x07) << 8;
+ settings->fb_div_frac = reg[0] | reg[1] << 8 | reg[2] << 16 |
+ reg[3] << 24;
+ return 0;
+}
+
+static int si544_set_muldiv(struct clk_si544 *data,
+ struct clk_si544_muldiv *settings)
+{
+ int err;
+ u8 reg[6];
+
+ reg[0] = settings->hs_div;
+ reg[1] = settings->hs_div >> 8 | settings->ls_div_bits << 4;
+
+ err = regmap_bulk_write(data->regmap, SI544_REG_HS_DIV, reg, 2);
+ if (err < 0)
+ return err;
+
+ reg[0] = settings->fb_div_frac;
+ reg[1] = settings->fb_div_frac >> 8;
+ reg[2] = settings->fb_div_frac >> 16;
+ reg[3] = settings->fb_div_frac >> 24;
+ reg[4] = settings->fb_div_int;
+ reg[5] = settings->fb_div_int >> 8;
+
+ /*
+ * Writing to SI544_REG_FBDIV40 triggers the clock change, so that
+ * must be written last
+ */
+ return regmap_bulk_write(data->regmap, SI544_REG_FBDIV0, reg, 6);
+}
+
+static bool is_valid_frequency(const struct clk_si544 *data,
+ unsigned long frequency)
+{
+ unsigned long max_freq = 0;
+
+ if (frequency < SI544_MIN_FREQ)
+ return false;
+
+ switch (data->speed_grade) {
+ case si544a:
+ max_freq = 1500000000;
+ break;
+ case si544b:
+ max_freq = 800000000;
+ break;
+ case si544c:
+ max_freq = 350000000;
+ break;
+ }
+
+ return frequency <= max_freq;
+}
+
+/* Calculate divider settings for a given frequency */
+static int si544_calc_muldiv(struct clk_si544_muldiv *settings,
+ unsigned long frequency)
+{
+ u64 vco;
+ u32 ls_freq;
+ u32 tmp;
+ u8 res;
+
+ /* Determine the minimum value of LS_DIV and resulting target freq. */
+ ls_freq = frequency;
+ settings->ls_div_bits = 0;
+
+ if (frequency >= MIN_HSDIV_FREQ) {
+ settings->ls_div_bits = 0;
+ } else {
+ res = 1;
+ tmp = 2 * HS_DIV_MAX;
+ while (tmp <= (HS_DIV_MAX * 32)) {
+ if (((u64)frequency * tmp) >= FVCO_MIN)
+ break;
+ ++res;
+ tmp <<= 1;
+ }
+ settings->ls_div_bits = res;
+ ls_freq = frequency << res;
+ }
+
+ /* Determine minimum HS_DIV by rounding up */
+ vco = FVCO_MIN + ls_freq - 1;
+ do_div(vco, ls_freq);
+ settings->hs_div = vco;
+
+ /* round up to even number when required */
+ if ((settings->hs_div & 1) &&
+ (settings->hs_div > HS_DIV_MAX_ODD || settings->ls_div_bits))
+ ++settings->hs_div;
+
+ /* Calculate VCO frequency (in 10..12GHz range) */
+ vco = (u64)ls_freq * settings->hs_div;
+
+ /* Calculate the integer part of the feedback divider */
+ tmp = do_div(vco, FXO);
+ settings->fb_div_int = vco;
+
+ /* And the fractional bits using the remainder */
+ vco = (u64)tmp << 32;
+ do_div(vco, FXO);
+ settings->fb_div_frac = vco;
+
+ return 0;
+}
+
+/* Calculate resulting frequency given the register settings */
+static unsigned long si544_calc_rate(struct clk_si544_muldiv *settings)
+{
+ u32 d = settings->hs_div * BIT(settings->ls_div_bits);
+ u64 vco;
+
+ /* Calculate VCO from the fractional part */
+ vco = (u64)settings->fb_div_frac * FXO;
+ vco += (FXO / 2);
+ vco >>= 32;
+
+ /* Add the integer part of the VCO frequency */
+ vco += (u64)settings->fb_div_int * FXO;
+
+ /* Apply divider to obtain the generated frequency */
+ do_div(vco, d);
+
+ return vco;
+}
+
+static unsigned long si544_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ err = si544_get_muldiv(data, &settings);
+ if (err)
+ return 0;
+
+ return si544_calc_rate(&settings);
+}
+
+static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ if (!is_valid_frequency(data, rate))
+ return -EINVAL;
+
+ err = si544_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ return si544_calc_rate(&settings);
+}
+
+/*
+ * Update output frequency for "big" frequency changes
+ */
+static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ if (!is_valid_frequency(data, rate))
+ return -EINVAL;
+
+ err = si544_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ si544_enable_output(data, false);
+
+ /* Allow FCAL for this frequency update */
+ err = regmap_write(data->regmap, SI544_REG_FCAL_OVR, 0);
+ if (err < 0)
+ return err;
+
+
+ err = si544_set_muldiv(data, &settings);
+ if (err < 0)
+ return err; /* Undefined state now, best to leave disabled */
+
+ /* Trigger calibration */
+ err = regmap_write(data->regmap, SI544_REG_CONTROL,
+ SI544_CONTROL_MS_ICAL2);
+ if (err < 0)
+ return err;
+
+ /* Applying a new frequency can take up to 10ms */
+ usleep_range(10000, 12000);
+
+ si544_enable_output(data, true);
+
+ return err;
+}
+
+static const struct clk_ops si544_clk_ops = {
+ .recalc_rate = si544_recalc_rate,
+ .round_rate = si544_round_rate,
+ .set_rate = si544_set_rate,
+};
+
+static bool si544_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI544_REG_CONTROL:
+ case SI544_REG_FCAL_OVR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config si544_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = SI544_REG_PAGE_SELECT,
+ .volatile_reg = si544_regmap_is_volatile,
+};
+
+static int si544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_si544 *data;
+ struct clk_init_data init;
+ int err;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &si544_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ data->hw.init = &init;
+ data->i2c_client = client;
+ data->speed_grade = id->driver_data;
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &init.name))
+ init.name = client->dev.of_node->name;
+
+ data->regmap = devm_regmap_init_i2c(client, &si544_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+
+ /* Select page 0, just to be sure, there appear to be no more */
+ err = regmap_write(data->regmap, SI544_REG_PAGE_SELECT, 0);
+ if (err < 0)
+ return err;
+
+ err = devm_clk_hw_register(&client->dev, &data->hw);
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
+ &data->hw);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si544_id[] = {
+ { "si544a", si544a },
+ { "si544b", si544b },
+ { "si544c", si544c },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si544_id);
+
+static const struct of_device_id clk_si544_of_match[] = {
+ { .compatible = "silabs,si544a" },
+ { .compatible = "silabs,si544b" },
+ { .compatible = "silabs,si544c" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_si544_of_match);
+
+static struct i2c_driver si544_driver = {
+ .driver = {
+ .name = "si544",
+ .of_match_table = clk_si544_of_match,
+ },
+ .probe = si544_probe,
+ .id_table = si544_id,
+};
+module_i2c_driver(si544_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("Si544 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index da44f8dc1d29..294850bdc195 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -282,6 +282,7 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
@@ -315,7 +316,7 @@ static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
- 0x04f77f033e01c9ffull };
+ 0x04f77f833e01c9ffull };
static const u64 *stm32f4_gate_map;
@@ -521,7 +522,7 @@ static const struct stm32f4_pll_data stm32f429_pll[MAX_PLL_DIV] = {
};
static const struct stm32f4_pll_data stm32f469_pll[MAX_PLL_DIV] = {
- { PLL, 50, { "pll", "pll-q", NULL } },
+ { PLL, 50, { "pll", "pll-q", "pll-r" } },
{ PLL_I2S, 50, { "plli2s-p", "plli2s-q", "plli2s-r" } },
{ PLL_SAI, 50, { "pllsai-p", "pllsai-q", "pllsai-r" } },
};
@@ -1047,6 +1048,8 @@ static const char *rtc_parents[4] = {
"no-clock", "lse", "lsi", "hse-rtc"
};
+static const char *dsi_parent[2] = { NULL, "pll-r" };
+
static const char *lcd_parent[1] = { "pllsai-r-div" };
static const char *i2s_parents[2] = { "plli2s-r", NULL };
@@ -1156,6 +1159,12 @@ static const struct stm32_aux_clk stm32f469_aux_clk[] = {
NO_GATE, 0,
0
},
+ {
+ CLK_F469_DSI, "dsi", dsi_parent, ARRAY_SIZE(dsi_parent),
+ STM32F4_RCC_DCKCFGR, 29, 1,
+ STM32F4_RCC_APB2ENR, 27,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
+ },
};
static const struct stm32_aux_clk stm32f746_aux_clk[] = {
@@ -1450,6 +1459,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
stm32f4_gate_map = data->gates_map;
hse_clk = of_clk_get_parent_name(np, 0);
+ dsi_parent[0] = hse_clk;
i2s_in_clk = of_clk_get_parent_name(np, 1);
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
new file mode 100644
index 000000000000..edd3cf451401
--- /dev/null
+++ b/drivers/clk/clk-stm32mp1.c
@@ -0,0 +1,2109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Olivier Bideau <olivier.bideau@st.com> for STMicroelectronics.
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/stm32mp1-clks.h>
+
+static DEFINE_SPINLOCK(rlock);
+
+#define RCC_OCENSETR 0x0C
+#define RCC_HSICFGR 0x18
+#define RCC_RDLSICR 0x144
+#define RCC_PLL1CR 0x80
+#define RCC_PLL1CFGR1 0x84
+#define RCC_PLL1CFGR2 0x88
+#define RCC_PLL2CR 0x94
+#define RCC_PLL2CFGR1 0x98
+#define RCC_PLL2CFGR2 0x9C
+#define RCC_PLL3CR 0x880
+#define RCC_PLL3CFGR1 0x884
+#define RCC_PLL3CFGR2 0x888
+#define RCC_PLL4CR 0x894
+#define RCC_PLL4CFGR1 0x898
+#define RCC_PLL4CFGR2 0x89C
+#define RCC_APB1ENSETR 0xA00
+#define RCC_APB2ENSETR 0xA08
+#define RCC_APB3ENSETR 0xA10
+#define RCC_APB4ENSETR 0x200
+#define RCC_APB5ENSETR 0x208
+#define RCC_AHB2ENSETR 0xA18
+#define RCC_AHB3ENSETR 0xA20
+#define RCC_AHB4ENSETR 0xA28
+#define RCC_AHB5ENSETR 0x210
+#define RCC_AHB6ENSETR 0x218
+#define RCC_AHB6LPENSETR 0x318
+#define RCC_RCK12SELR 0x28
+#define RCC_RCK3SELR 0x820
+#define RCC_RCK4SELR 0x824
+#define RCC_MPCKSELR 0x20
+#define RCC_ASSCKSELR 0x24
+#define RCC_MSSCKSELR 0x48
+#define RCC_SPI6CKSELR 0xC4
+#define RCC_SDMMC12CKSELR 0x8F4
+#define RCC_SDMMC3CKSELR 0x8F8
+#define RCC_FMCCKSELR 0x904
+#define RCC_I2C46CKSELR 0xC0
+#define RCC_I2C12CKSELR 0x8C0
+#define RCC_I2C35CKSELR 0x8C4
+#define RCC_UART1CKSELR 0xC8
+#define RCC_QSPICKSELR 0x900
+#define RCC_ETHCKSELR 0x8FC
+#define RCC_RNG1CKSELR 0xCC
+#define RCC_RNG2CKSELR 0x920
+#define RCC_GPUCKSELR 0x938
+#define RCC_USBCKSELR 0x91C
+#define RCC_STGENCKSELR 0xD4
+#define RCC_SPDIFCKSELR 0x914
+#define RCC_SPI2S1CKSELR 0x8D8
+#define RCC_SPI2S23CKSELR 0x8DC
+#define RCC_SPI2S45CKSELR 0x8E0
+#define RCC_CECCKSELR 0x918
+#define RCC_LPTIM1CKSELR 0x934
+#define RCC_LPTIM23CKSELR 0x930
+#define RCC_LPTIM45CKSELR 0x92C
+#define RCC_UART24CKSELR 0x8E8
+#define RCC_UART35CKSELR 0x8EC
+#define RCC_UART6CKSELR 0x8E4
+#define RCC_UART78CKSELR 0x8F0
+#define RCC_FDCANCKSELR 0x90C
+#define RCC_SAI1CKSELR 0x8C8
+#define RCC_SAI2CKSELR 0x8CC
+#define RCC_SAI3CKSELR 0x8D0
+#define RCC_SAI4CKSELR 0x8D4
+#define RCC_ADCCKSELR 0x928
+#define RCC_MPCKDIVR 0x2C
+#define RCC_DSICKSELR 0x924
+#define RCC_CPERCKSELR 0xD0
+#define RCC_MCO1CFGR 0x800
+#define RCC_MCO2CFGR 0x804
+#define RCC_BDCR 0x140
+#define RCC_AXIDIVR 0x30
+#define RCC_MCUDIVR 0x830
+#define RCC_APB1DIVR 0x834
+#define RCC_APB2DIVR 0x838
+#define RCC_APB3DIVR 0x83C
+#define RCC_APB4DIVR 0x3C
+#define RCC_APB5DIVR 0x40
+#define RCC_TIMG1PRER 0x828
+#define RCC_TIMG2PRER 0x82C
+#define RCC_RTCDIVR 0x44
+#define RCC_DBGCFGR 0x80C
+
+#define RCC_CLR 0x4
+
+static const char * const ref12_parents[] = {
+ "ck_hsi", "ck_hse"
+};
+
+static const char * const ref3_parents[] = {
+ "ck_hsi", "ck_hse", "ck_csi"
+};
+
+static const char * const ref4_parents[] = {
+ "ck_hsi", "ck_hse", "ck_csi"
+};
+
+static const char * const cpu_src[] = {
+ "ck_hsi", "ck_hse", "pll1_p"
+};
+
+static const char * const axi_src[] = {
+ "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+};
+
+static const char * const per_src[] = {
+ "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const mcu_src[] = {
+ "ck_hsi", "ck_hse", "ck_csi", "pll3_p"
+};
+
+static const char * const sdmmc12_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_hsi"
+};
+
+static const char * const sdmmc3_src[] = {
+ "ck_mcu", "pll3_r", "pll4_p", "ck_hsi"
+};
+
+static const char * const fmc_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const qspi_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const eth_src[] = {
+ "pll4_p", "pll3_q"
+};
+
+static const char * const rng_src[] = {
+ "ck_csi", "pll4_r", "ck_lse", "ck_lsi"
+};
+
+static const char * const usbphy_src[] = {
+ "ck_hse", "pll4_r", "clk-hse-div2"
+};
+
+static const char * const usbo_src[] = {
+ "pll4_r", "ck_usbo_48m"
+};
+
+static const char * const stgen_src[] = {
+ "ck_hsi", "ck_hse"
+};
+
+static const char * const spdif_src[] = {
+ "pll4_p", "pll3_q", "ck_hsi"
+};
+
+static const char * const spi123_src[] = {
+ "pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
+};
+
+static const char * const spi45_src[] = {
+ "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const spi6_src[] = {
+ "pclk5", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "pll3_q"
+};
+
+static const char * const cec_src[] = {
+ "ck_lse", "ck_lsi", "ck_csi"
+};
+
+static const char * const i2c12_src[] = {
+ "pclk1", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const i2c35_src[] = {
+ "pclk1", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const i2c46_src[] = {
+ "pclk5", "pll3_q", "ck_hsi", "ck_csi"
+};
+
+static const char * const lptim1_src[] = {
+ "pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const lptim23_src[] = {
+ "pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi"
+};
+
+static const char * const lptim45_src[] = {
+ "pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const usart1_src[] = {
+ "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
+};
+
+static const char * const usart234578_src[] = {
+ "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const usart6_src[] = {
+ "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const fdcan_src[] = {
+ "ck_hse", "pll3_q", "pll4_q"
+};
+
+static const char * const sai_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+};
+
+static const char * const sai2_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+};
+
+static const char * const adc12_src[] = {
+ "pll4_q", "ck_per"
+};
+
+static const char * const dsi_src[] = {
+ "ck_dsi_phy", "pll4_p"
+};
+
+static const char * const rtc_src[] = {
+ "off", "ck_lse", "ck_lsi", "ck_hse_rtc"
+};
+
+static const char * const mco1_src[] = {
+ "ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse"
+};
+
+static const char * const mco2_src[] = {
+ "ck_mpu", "ck_axi", "ck_mcu", "pll4_p", "ck_hse", "ck_hsi"
+};
+
+static const char * const ck_trace_src[] = {
+ "ck_axi"
+};
+
+static const struct clk_div_table axi_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 4 }, { 5, 4 }, { 6, 4 }, { 7, 4 },
+ { 0 },
+};
+
+static const struct clk_div_table mcu_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
+ { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
+ { 0 },
+};
+
+static const struct clk_div_table apb_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
+ { 0 },
+};
+
+static const struct clk_div_table ck_trace_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
+ { 0 },
+};
+
+#define MAX_MUX_CLK 2
+
+struct stm32_mmux {
+ u8 nbr_clk;
+ struct clk_hw *hws[MAX_MUX_CLK];
+};
+
+struct stm32_clk_mmux {
+ struct clk_mux mux;
+ struct stm32_mmux *mmux;
+};
+
+struct stm32_mgate {
+ u8 nbr_clk;
+ u32 flag;
+};
+
+struct stm32_clk_mgate {
+ struct clk_gate gate;
+ struct stm32_mgate *mgate;
+ u32 mask;
+};
+
+struct clock_config {
+ u32 id;
+ const char *name;
+ const char *parent_name;
+ const char * const *parent_names;
+ int num_parents;
+ unsigned long flags;
+ void *cfg;
+ struct clk_hw * (*func)(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg);
+};
+
+#define NO_ID ~0
+
+struct gate_cfg {
+ u32 reg_off;
+ u8 bit_idx;
+ u8 gate_flags;
+};
+
+struct fixed_factor_cfg {
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct div_cfg {
+ u32 reg_off;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ const struct clk_div_table *table;
+};
+
+struct mux_cfg {
+ u32 reg_off;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ u32 *table;
+};
+
+struct stm32_gate_cfg {
+ struct gate_cfg *gate;
+ struct stm32_mgate *mgate;
+ const struct clk_ops *ops;
+};
+
+struct stm32_div_cfg {
+ struct div_cfg *div;
+ const struct clk_ops *ops;
+};
+
+struct stm32_mux_cfg {
+ struct mux_cfg *mux;
+ struct stm32_mmux *mmux;
+ const struct clk_ops *ops;
+};
+
+/* STM32 Composite clock */
+struct stm32_composite_cfg {
+ const struct stm32_gate_cfg *gate;
+ const struct stm32_div_cfg *div;
+ const struct stm32_mux_cfg *mux;
+};
+
+static struct clk_hw *
+_clk_hw_register_gate(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct gate_cfg *gate_cfg = cfg->cfg;
+
+ return clk_hw_register_gate(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ gate_cfg->reg_off + base,
+ gate_cfg->bit_idx,
+ gate_cfg->gate_flags,
+ lock);
+}
+
+static struct clk_hw *
+_clk_hw_register_fixed_factor(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct fixed_factor_cfg *ff_cfg = cfg->cfg;
+
+ return clk_hw_register_fixed_factor(dev, cfg->name, cfg->parent_name,
+ cfg->flags, ff_cfg->mult,
+ ff_cfg->div);
+}
+
+static struct clk_hw *
+_clk_hw_register_divider_table(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct div_cfg *div_cfg = cfg->cfg;
+
+ return clk_hw_register_divider_table(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+}
+
+static struct clk_hw *
+_clk_hw_register_mux(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct mux_cfg *mux_cfg = cfg->cfg;
+
+ return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base, mux_cfg->shift,
+ mux_cfg->width, mux_cfg->mux_flags, lock);
+}
+
+/* MP1 Gate clock with set & clear registers */
+
+static int mp1_gate_clk_enable(struct clk_hw *hw)
+{
+ if (!clk_gate_ops.is_enabled(hw))
+ clk_gate_ops.enable(hw);
+
+ return 0;
+}
+
+static void mp1_gate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ if (clk_gate_ops.is_enabled(hw)) {
+ spin_lock_irqsave(gate->lock, flags);
+ writel_relaxed(BIT(gate->bit_idx), gate->reg + RCC_CLR);
+ spin_unlock_irqrestore(gate->lock, flags);
+ }
+}
+
+static const struct clk_ops mp1_gate_clk_ops = {
+ .enable = mp1_gate_clk_enable,
+ .disable = mp1_gate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static struct clk_hw *_get_stm32_mux(void __iomem *base,
+ const struct stm32_mux_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct stm32_clk_mmux *mmux;
+ struct clk_mux *mux;
+ struct clk_hw *mux_hw;
+
+ if (cfg->mmux) {
+ mmux = kzalloc(sizeof(*mmux), GFP_KERNEL);
+ if (!mmux)
+ return ERR_PTR(-ENOMEM);
+
+ mmux->mux.reg = cfg->mux->reg_off + base;
+ mmux->mux.shift = cfg->mux->shift;
+ mmux->mux.mask = (1 << cfg->mux->width) - 1;
+ mmux->mux.flags = cfg->mux->mux_flags;
+ mmux->mux.table = cfg->mux->table;
+ mmux->mux.lock = lock;
+ mmux->mmux = cfg->mmux;
+ mux_hw = &mmux->mux.hw;
+ cfg->mmux->hws[cfg->mmux->nbr_clk++] = mux_hw;
+
+ } else {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = cfg->mux->reg_off + base;
+ mux->shift = cfg->mux->shift;
+ mux->mask = (1 << cfg->mux->width) - 1;
+ mux->flags = cfg->mux->mux_flags;
+ mux->table = cfg->mux->table;
+ mux->lock = lock;
+ mux_hw = &mux->hw;
+ }
+
+ return mux_hw;
+}
+
+static struct clk_hw *_get_stm32_div(void __iomem *base,
+ const struct stm32_div_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->reg = cfg->div->reg_off + base;
+ div->shift = cfg->div->shift;
+ div->width = cfg->div->width;
+ div->flags = cfg->div->div_flags;
+ div->table = cfg->div->table;
+ div->lock = lock;
+
+ return &div->hw;
+}
+
+static struct clk_hw *
+_get_stm32_gate(void __iomem *base,
+ const struct stm32_gate_cfg *cfg, spinlock_t *lock)
+{
+ struct stm32_clk_mgate *mgate;
+ struct clk_gate *gate;
+ struct clk_hw *gate_hw;
+
+ if (cfg->mgate) {
+ mgate = kzalloc(sizeof(*mgate), GFP_KERNEL);
+ if (!mgate)
+ return ERR_PTR(-ENOMEM);
+
+ mgate->gate.reg = cfg->gate->reg_off + base;
+ mgate->gate.bit_idx = cfg->gate->bit_idx;
+ mgate->gate.flags = cfg->gate->gate_flags;
+ mgate->gate.lock = lock;
+ mgate->mask = BIT(cfg->mgate->nbr_clk++);
+
+ mgate->mgate = cfg->mgate;
+
+ gate_hw = &mgate->gate.hw;
+
+ } else {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = cfg->gate->reg_off + base;
+ gate->bit_idx = cfg->gate->bit_idx;
+ gate->flags = cfg->gate->gate_flags;
+ gate->lock = lock;
+
+ gate_hw = &gate->hw;
+ }
+
+ return gate_hw;
+}
+
+static struct clk_hw *
+clk_stm32_register_gate_ops(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base,
+ const struct stm32_gate_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct clk_init_data init = { NULL };
+ struct clk_gate *gate;
+ struct clk_hw *hw;
+ int ret;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ init.ops = &clk_gate_ops;
+
+ if (cfg->ops)
+ init.ops = cfg->ops;
+
+ hw = _get_stm32_gate(base, cfg, lock);
+ if (IS_ERR(hw))
+ return ERR_PTR(-ENOMEM);
+
+ hw->init = &init;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(gate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+clk_stm32_register_composite(struct device *dev,
+ const char *name, const char * const *parent_names,
+ int num_parents, void __iomem *base,
+ const struct stm32_composite_cfg *cfg,
+ unsigned long flags, spinlock_t *lock)
+{
+ const struct clk_ops *mux_ops, *div_ops, *gate_ops;
+ struct clk_hw *mux_hw, *div_hw, *gate_hw;
+
+ mux_hw = NULL;
+ div_hw = NULL;
+ gate_hw = NULL;
+ mux_ops = NULL;
+ div_ops = NULL;
+ gate_ops = NULL;
+
+ if (cfg->mux) {
+ mux_hw = _get_stm32_mux(base, cfg->mux, lock);
+
+ if (!IS_ERR(mux_hw)) {
+ mux_ops = &clk_mux_ops;
+
+ if (cfg->mux->ops)
+ mux_ops = cfg->mux->ops;
+ }
+ }
+
+ if (cfg->div) {
+ div_hw = _get_stm32_div(base, cfg->div, lock);
+
+ if (!IS_ERR(div_hw)) {
+ div_ops = &clk_divider_ops;
+
+ if (cfg->div->ops)
+ div_ops = cfg->div->ops;
+ }
+ }
+
+ if (cfg->gate) {
+ gate_hw = _get_stm32_gate(base, cfg->gate, lock);
+
+ if (!IS_ERR(gate_hw)) {
+ gate_ops = &clk_gate_ops;
+
+ if (cfg->gate->ops)
+ gate_ops = cfg->gate->ops;
+ }
+ }
+
+ return clk_hw_register_composite(dev, name, parent_names, num_parents,
+ mux_hw, mux_ops, div_hw, div_ops,
+ gate_hw, gate_ops, flags);
+}
+
+#define to_clk_mgate(_gate) container_of(_gate, struct stm32_clk_mgate, gate)
+
+static int mp1_mgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag |= clk_mgate->mask;
+
+ mp1_gate_clk_enable(hw);
+
+ return 0;
+}
+
+static void mp1_mgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag &= ~clk_mgate->mask;
+
+ if (clk_mgate->mgate->flag == 0)
+ mp1_gate_clk_disable(hw);
+}
+
+static const struct clk_ops mp1_mgate_clk_ops = {
+ .enable = mp1_mgate_clk_enable,
+ .disable = mp1_mgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+
+};
+
+#define to_clk_mmux(_mux) container_of(_mux, struct stm32_clk_mmux, mux)
+
+static u8 clk_mmux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
+ struct clk_hw *hwp;
+ int ret, n;
+
+ ret = clk_mux_ops.set_parent(hw, index);
+ if (ret)
+ return ret;
+
+ hwp = clk_hw_get_parent(hw);
+
+ for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
+ if (clk_mmux->mmux->hws[n] != hw)
+ clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
+
+ return 0;
+}
+
+static const struct clk_ops clk_mmux_ops = {
+ .get_parent = clk_mmux_get_parent,
+ .set_parent = clk_mmux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+/* STM32 PLL */
+struct stm32_pll_obj {
+ /* lock pll enable/disable registers */
+ spinlock_t *lock;
+ void __iomem *reg;
+ struct clk_hw hw;
+};
+
+#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
+
+#define PLL_ON BIT(0)
+#define PLL_RDY BIT(1)
+#define DIVN_MASK 0x1FF
+#define DIVM_MASK 0x3F
+#define DIVM_SHIFT 16
+#define DIVN_SHIFT 0
+#define FRAC_OFFSET 0xC
+#define FRAC_MASK 0x1FFF
+#define FRAC_SHIFT 3
+#define FRACLE BIT(16)
+
+static int __pll_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+
+ return readl_relaxed(clk_elem->reg) & PLL_ON;
+}
+
+#define TIMEOUT 5
+
+static int pll_enable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ unsigned long flags = 0;
+ unsigned int timeout = TIMEOUT;
+ int bit_status = 0;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+
+ if (__pll_is_enabled(hw))
+ goto unlock;
+
+ reg = readl_relaxed(clk_elem->reg);
+ reg |= PLL_ON;
+ writel_relaxed(reg, clk_elem->reg);
+
+ /* We can't use readl_poll_timeout() because we can be blocked if
+ * someone enables this clock before clocksource changes.
+ * Only jiffies counter is available. Jiffies are incremented by
+ * interruptions and enable op does not allow to be interrupted.
+ */
+ do {
+ bit_status = !(readl_relaxed(clk_elem->reg) & PLL_RDY);
+
+ if (bit_status)
+ udelay(120);
+
+ } while (bit_status && --timeout);
+
+unlock:
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+
+ return bit_status;
+}
+
+static void pll_disable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+
+ reg = readl_relaxed(clk_elem->reg);
+ reg &= ~PLL_ON;
+ writel_relaxed(reg, clk_elem->reg);
+
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+}
+
+static u32 pll_frac_val(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg, frac = 0;
+
+ reg = readl_relaxed(clk_elem->reg + FRAC_OFFSET);
+ if (reg & FRACLE)
+ frac = (reg >> FRAC_SHIFT) & FRAC_MASK;
+
+ return frac;
+}
+
+static unsigned long pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ u32 frac, divm, divn;
+ u64 rate, rate_frac = 0;
+
+ reg = readl_relaxed(clk_elem->reg + 4);
+
+ divm = ((reg >> DIVM_SHIFT) & DIVM_MASK) + 1;
+ divn = ((reg >> DIVN_SHIFT) & DIVN_MASK) + 1;
+ rate = (u64)parent_rate * divn;
+
+ do_div(rate, divm);
+
+ frac = pll_frac_val(hw);
+ if (frac) {
+ rate_frac = (u64)parent_rate * (u64)frac;
+ do_div(rate_frac, (divm * 8192));
+ }
+
+ return rate + rate_frac;
+}
+
+static int pll_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+ ret = __pll_is_enabled(hw);
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops pll_ops = {
+ .enable = pll_enable,
+ .disable = pll_disable,
+ .recalc_rate = pll_recalc_rate,
+ .is_enabled = pll_is_enabled,
+};
+
+static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned long flags,
+ spinlock_t *lock)
+{
+ struct stm32_pll_obj *element;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int err;
+
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (!element)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ element->hw.init = &init;
+ element->reg = reg;
+ element->lock = lock;
+
+ hw = &element->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err) {
+ kfree(element);
+ return ERR_PTR(err);
+ }
+
+ return hw;
+}
+
+/* Kernel Timer */
+struct timer_cker {
+ /* lock the kernel output divider register */
+ spinlock_t *lock;
+ void __iomem *apbdiv;
+ void __iomem *timpre;
+ struct clk_hw hw;
+};
+
+#define to_timer_cker(_hw) container_of(_hw, struct timer_cker, hw)
+
+#define APB_DIV_MASK 0x07
+#define TIM_PRE_MASK 0x01
+
+static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ u32 prescaler;
+ unsigned int mult = 0;
+
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
+ if (prescaler < 2)
+ return 1;
+
+ mult = 2;
+
+ if (rate / parent_rate >= 4)
+ mult = 4;
+
+ return mult;
+}
+
+static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long factor = __bestmult(hw, rate, *parent_rate);
+
+ return *parent_rate * factor;
+}
+
+static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ unsigned long flags = 0;
+ unsigned long factor = __bestmult(hw, rate, parent_rate);
+ int ret = 0;
+
+ spin_lock_irqsave(tim_ker->lock, flags);
+
+ switch (factor) {
+ case 1:
+ break;
+ case 2:
+ writel_relaxed(0, tim_ker->timpre);
+ break;
+ case 4:
+ writel_relaxed(1, tim_ker->timpre);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(tim_ker->lock, flags);
+
+ return ret;
+}
+
+static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ u32 prescaler, timpre;
+ u32 mul;
+
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
+
+ timpre = readl_relaxed(tim_ker->timpre) & TIM_PRE_MASK;
+
+ if (!prescaler)
+ return parent_rate;
+
+ mul = (timpre + 1) * 2;
+
+ return parent_rate * mul;
+}
+
+static const struct clk_ops timer_ker_ops = {
+ .recalc_rate = timer_ker_recalc_rate,
+ .round_rate = timer_ker_round_rate,
+ .set_rate = timer_ker_set_rate,
+
+};
+
+static struct clk_hw *clk_register_cktim(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *apbdiv,
+ void __iomem *timpre,
+ spinlock_t *lock)
+{
+ struct timer_cker *tim_ker;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int err;
+
+ tim_ker = kzalloc(sizeof(*tim_ker), GFP_KERNEL);
+ if (!tim_ker)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &timer_ker_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ tim_ker->hw.init = &init;
+ tim_ker->lock = lock;
+ tim_ker->apbdiv = apbdiv;
+ tim_ker->timpre = timpre;
+
+ hw = &tim_ker->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err) {
+ kfree(tim_ker);
+ return ERR_PTR(err);
+ }
+
+ return hw;
+}
+
+struct stm32_pll_cfg {
+ u32 offset;
+};
+
+static struct clk_hw *_clk_register_pll(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
+
+ return clk_register_pll(dev, cfg->name, cfg->parent_name,
+ base + stm_pll_cfg->offset, cfg->flags, lock);
+}
+
+struct stm32_cktim_cfg {
+ u32 offset_apbdiv;
+ u32 offset_timpre;
+};
+
+static struct clk_hw *_clk_register_cktim(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct stm32_cktim_cfg *cktim_cfg = cfg->cfg;
+
+ return clk_register_cktim(dev, cfg->name, cfg->parent_name, cfg->flags,
+ cktim_cfg->offset_apbdiv + base,
+ cktim_cfg->offset_timpre + base, lock);
+}
+
+static struct clk_hw *
+_clk_stm32_register_gate(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ return clk_stm32_register_gate_ops(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ base,
+ cfg->cfg,
+ lock);
+}
+
+static struct clk_hw *
+_clk_stm32_register_composite(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, base, cfg->cfg,
+ cfg->flags, lock);
+}
+
+#define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct gate_cfg) {\
+ .reg_off = _offset,\
+ .bit_idx = _bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .func = _clk_hw_register_gate,\
+}
+
+#define FIXED_FACTOR(_id, _name, _parent, _flags, _mult, _div)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct fixed_factor_cfg) {\
+ .mult = _mult,\
+ .div = _div,\
+ },\
+ .func = _clk_hw_register_fixed_factor,\
+}
+
+#define DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, _div_table)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct div_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .func = _clk_hw_register_divider_table,\
+}
+
+#define DIV(_id, _name, _parent, _flags, _offset, _shift, _width, _div_flags)\
+ DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, NULL)
+
+#define MUX(_id, _name, _parents, _flags, _offset, _shift, _width, _mux_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ },\
+ .func = _clk_hw_register_mux,\
+}
+
+#define PLL(_id, _name, _parent, _flags, _offset)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct stm32_pll_cfg) {\
+ .offset = _offset,\
+ },\
+ .func = _clk_register_pll,\
+}
+
+#define STM32_CKTIM(_name, _parent, _flags, _offset_apbdiv, _offset_timpre)\
+{\
+ .id = NO_ID,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct stm32_cktim_cfg) {\
+ .offset_apbdiv = _offset_apbdiv,\
+ .offset_timpre = _offset_timpre,\
+ },\
+ .func = _clk_register_cktim,\
+}
+
+#define STM32_TIM(_id, _name, _parent, _offset_set, _bit_idx)\
+ GATE_MP1(_id, _name, _parent, CLK_SET_RATE_PARENT,\
+ _offset_set, _bit_idx, 0)
+
+/* STM32 GATE */
+#define STM32_GATE(_id, _name, _parent, _flags, _gate)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = (struct stm32_gate_cfg *) {_gate},\
+ .func = _clk_stm32_register_gate,\
+}
+
+#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
+ (&(struct stm32_gate_cfg) {\
+ &(struct gate_cfg) {\
+ .reg_off = _gate_offset,\
+ .bit_idx = _gate_bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .mgate = _mgate,\
+ .ops = _ops,\
+ })
+
+#define _STM32_MGATE(_mgate)\
+ (&per_gate_cfg[_mgate])
+
+#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, NULL)\
+
+#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops)\
+
+#define _MGATE_MP1(_mgate)\
+ .gate = &per_gate_cfg[_mgate]
+
+#define GATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _GATE_MP1(_offset, _bit_idx, _gate_flags))
+
+#define MGATE_MP1(_id, _name, _parent, _flags, _mgate)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _STM32_MGATE(_mgate))
+
+#define _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, _ops)\
+ .div = &(struct stm32_div_cfg) {\
+ &(struct div_cfg) {\
+ .reg_off = _div_offset,\
+ .shift = _div_shift,\
+ .width = _div_width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .ops = _ops,\
+ }
+
+#define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
+ _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, NULL)\
+
+#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
+ .mux = &(struct stm32_mux_cfg) {\
+ &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ .table = NULL,\
+ },\
+ .mmux = _mmux,\
+ .ops = _ops,\
+ }
+
+#define _MUX(_offset, _shift, _width, _mux_flags)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
+
+#define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
+
+#define PARENT(_parent) ((const char *[]) { _parent})
+
+#define _NO_MUX .mux = NULL
+#define _NO_DIV .div = NULL
+#define _NO_GATE .gate = NULL
+
+#define COMPOSITE(_id, _name, _parents, _flags, _gate, _mux, _div)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct stm32_composite_cfg) {\
+ _gate,\
+ _mux,\
+ _div,\
+ },\
+ .func = _clk_stm32_register_composite,\
+}
+
+#define PCLK(_id, _name, _parent, _flags, _mgate)\
+ MGATE_MP1(_id, _name, _parent, _flags, _mgate)
+
+#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
+
+enum {
+ G_SAI1,
+ G_SAI2,
+ G_SAI3,
+ G_SAI4,
+ G_SPI1,
+ G_SPI2,
+ G_SPI3,
+ G_SPI4,
+ G_SPI5,
+ G_SPI6,
+ G_SPDIF,
+ G_I2C1,
+ G_I2C2,
+ G_I2C3,
+ G_I2C4,
+ G_I2C5,
+ G_I2C6,
+ G_USART2,
+ G_UART4,
+ G_USART3,
+ G_UART5,
+ G_USART1,
+ G_USART6,
+ G_UART7,
+ G_UART8,
+ G_LPTIM1,
+ G_LPTIM2,
+ G_LPTIM3,
+ G_LPTIM4,
+ G_LPTIM5,
+ G_LTDC,
+ G_DSI,
+ G_QSPI,
+ G_FMC,
+ G_SDMMC1,
+ G_SDMMC2,
+ G_SDMMC3,
+ G_USBO,
+ G_USBPHY,
+ G_RNG1,
+ G_RNG2,
+ G_FDCAN,
+ G_DAC12,
+ G_CEC,
+ G_ADC12,
+ G_GPU,
+ G_STGEN,
+ G_DFSDM,
+ G_ADFSDM,
+ G_TIM2,
+ G_TIM3,
+ G_TIM4,
+ G_TIM5,
+ G_TIM6,
+ G_TIM7,
+ G_TIM12,
+ G_TIM13,
+ G_TIM14,
+ G_MDIO,
+ G_TIM1,
+ G_TIM8,
+ G_TIM15,
+ G_TIM16,
+ G_TIM17,
+ G_SYSCFG,
+ G_VREF,
+ G_TMPSENS,
+ G_PMBCTRL,
+ G_HDP,
+ G_IWDG2,
+ G_STGENRO,
+ G_DMA1,
+ G_DMA2,
+ G_DMAMUX,
+ G_DCMI,
+ G_CRYP2,
+ G_HASH2,
+ G_CRC2,
+ G_HSEM,
+ G_IPCC,
+ G_GPIOA,
+ G_GPIOB,
+ G_GPIOC,
+ G_GPIOD,
+ G_GPIOE,
+ G_GPIOF,
+ G_GPIOG,
+ G_GPIOH,
+ G_GPIOI,
+ G_GPIOJ,
+ G_GPIOK,
+ G_MDMA,
+ G_ETHCK,
+ G_ETHTX,
+ G_ETHRX,
+ G_ETHMAC,
+ G_CRC1,
+ G_USBH,
+ G_ETHSTP,
+ G_RTCAPB,
+ G_TZC1,
+ G_TZC2,
+ G_TZPC,
+ G_IWDG1,
+ G_BSEC,
+ G_GPIOZ,
+ G_CRYP1,
+ G_HASH1,
+ G_BKPSRAM,
+
+ G_LAST
+};
+
+static struct stm32_mgate mp1_mgate[G_LAST];
+
+#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ _mgate, _ops)\
+ [_id] = {\
+ &(struct gate_cfg) {\
+ .reg_off = _gate_offset,\
+ .bit_idx = _gate_bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .mgate = _mgate,\
+ .ops = _ops,\
+ }
+
+#define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops)
+
+#define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ &mp1_mgate[_id], &mp1_mgate_clk_ops)
+
+/* Peripheral gates */
+static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
+ /* Multi gates */
+ K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0),
+ K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0),
+ K_MGATE(G_CEC, RCC_APB1ENSETR, 27, 0),
+ K_MGATE(G_SPDIF, RCC_APB1ENSETR, 26, 0),
+ K_MGATE(G_I2C5, RCC_APB1ENSETR, 24, 0),
+ K_MGATE(G_I2C3, RCC_APB1ENSETR, 23, 0),
+ K_MGATE(G_I2C2, RCC_APB1ENSETR, 22, 0),
+ K_MGATE(G_I2C1, RCC_APB1ENSETR, 21, 0),
+ K_MGATE(G_UART8, RCC_APB1ENSETR, 19, 0),
+ K_MGATE(G_UART7, RCC_APB1ENSETR, 18, 0),
+ K_MGATE(G_UART5, RCC_APB1ENSETR, 17, 0),
+ K_MGATE(G_UART4, RCC_APB1ENSETR, 16, 0),
+ K_MGATE(G_USART3, RCC_APB1ENSETR, 15, 0),
+ K_MGATE(G_USART2, RCC_APB1ENSETR, 14, 0),
+ K_MGATE(G_SPI3, RCC_APB1ENSETR, 12, 0),
+ K_MGATE(G_SPI2, RCC_APB1ENSETR, 11, 0),
+ K_MGATE(G_LPTIM1, RCC_APB1ENSETR, 9, 0),
+ K_GATE(G_TIM14, RCC_APB1ENSETR, 8, 0),
+ K_GATE(G_TIM13, RCC_APB1ENSETR, 7, 0),
+ K_GATE(G_TIM12, RCC_APB1ENSETR, 6, 0),
+ K_GATE(G_TIM7, RCC_APB1ENSETR, 5, 0),
+ K_GATE(G_TIM6, RCC_APB1ENSETR, 4, 0),
+ K_GATE(G_TIM5, RCC_APB1ENSETR, 3, 0),
+ K_GATE(G_TIM4, RCC_APB1ENSETR, 2, 0),
+ K_GATE(G_TIM3, RCC_APB1ENSETR, 1, 0),
+ K_GATE(G_TIM2, RCC_APB1ENSETR, 0, 0),
+
+ K_MGATE(G_FDCAN, RCC_APB2ENSETR, 24, 0),
+ K_GATE(G_ADFSDM, RCC_APB2ENSETR, 21, 0),
+ K_GATE(G_DFSDM, RCC_APB2ENSETR, 20, 0),
+ K_MGATE(G_SAI3, RCC_APB2ENSETR, 18, 0),
+ K_MGATE(G_SAI2, RCC_APB2ENSETR, 17, 0),
+ K_MGATE(G_SAI1, RCC_APB2ENSETR, 16, 0),
+ K_MGATE(G_USART6, RCC_APB2ENSETR, 13, 0),
+ K_MGATE(G_SPI5, RCC_APB2ENSETR, 10, 0),
+ K_MGATE(G_SPI4, RCC_APB2ENSETR, 9, 0),
+ K_MGATE(G_SPI1, RCC_APB2ENSETR, 8, 0),
+ K_GATE(G_TIM17, RCC_APB2ENSETR, 4, 0),
+ K_GATE(G_TIM16, RCC_APB2ENSETR, 3, 0),
+ K_GATE(G_TIM15, RCC_APB2ENSETR, 2, 0),
+ K_GATE(G_TIM8, RCC_APB2ENSETR, 1, 0),
+ K_GATE(G_TIM1, RCC_APB2ENSETR, 0, 0),
+
+ K_GATE(G_HDP, RCC_APB3ENSETR, 20, 0),
+ K_GATE(G_PMBCTRL, RCC_APB3ENSETR, 17, 0),
+ K_GATE(G_TMPSENS, RCC_APB3ENSETR, 16, 0),
+ K_GATE(G_VREF, RCC_APB3ENSETR, 13, 0),
+ K_GATE(G_SYSCFG, RCC_APB3ENSETR, 11, 0),
+ K_MGATE(G_SAI4, RCC_APB3ENSETR, 8, 0),
+ K_MGATE(G_LPTIM5, RCC_APB3ENSETR, 3, 0),
+ K_MGATE(G_LPTIM4, RCC_APB3ENSETR, 2, 0),
+ K_MGATE(G_LPTIM3, RCC_APB3ENSETR, 1, 0),
+ K_MGATE(G_LPTIM2, RCC_APB3ENSETR, 0, 0),
+
+ K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
+ K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
+ K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
+ K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
+
+ K_GATE(G_STGEN, RCC_APB5ENSETR, 20, 0),
+ K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
+ K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
+ K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
+ K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0),
+ K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0),
+ K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
+ K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
+ K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
+ K_MGATE(G_I2C4, RCC_APB5ENSETR, 2, 0),
+ K_MGATE(G_SPI6, RCC_APB5ENSETR, 0, 0),
+
+ K_MGATE(G_SDMMC3, RCC_AHB2ENSETR, 16, 0),
+ K_MGATE(G_USBO, RCC_AHB2ENSETR, 8, 0),
+ K_MGATE(G_ADC12, RCC_AHB2ENSETR, 5, 0),
+ K_GATE(G_DMAMUX, RCC_AHB2ENSETR, 2, 0),
+ K_GATE(G_DMA2, RCC_AHB2ENSETR, 1, 0),
+ K_GATE(G_DMA1, RCC_AHB2ENSETR, 0, 0),
+
+ K_GATE(G_IPCC, RCC_AHB3ENSETR, 12, 0),
+ K_GATE(G_HSEM, RCC_AHB3ENSETR, 11, 0),
+ K_GATE(G_CRC2, RCC_AHB3ENSETR, 7, 0),
+ K_MGATE(G_RNG2, RCC_AHB3ENSETR, 6, 0),
+ K_GATE(G_HASH2, RCC_AHB3ENSETR, 5, 0),
+ K_GATE(G_CRYP2, RCC_AHB3ENSETR, 4, 0),
+ K_GATE(G_DCMI, RCC_AHB3ENSETR, 0, 0),
+
+ K_GATE(G_GPIOK, RCC_AHB4ENSETR, 10, 0),
+ K_GATE(G_GPIOJ, RCC_AHB4ENSETR, 9, 0),
+ K_GATE(G_GPIOI, RCC_AHB4ENSETR, 8, 0),
+ K_GATE(G_GPIOH, RCC_AHB4ENSETR, 7, 0),
+ K_GATE(G_GPIOG, RCC_AHB4ENSETR, 6, 0),
+ K_GATE(G_GPIOF, RCC_AHB4ENSETR, 5, 0),
+ K_GATE(G_GPIOE, RCC_AHB4ENSETR, 4, 0),
+ K_GATE(G_GPIOD, RCC_AHB4ENSETR, 3, 0),
+ K_GATE(G_GPIOC, RCC_AHB4ENSETR, 2, 0),
+ K_GATE(G_GPIOB, RCC_AHB4ENSETR, 1, 0),
+ K_GATE(G_GPIOA, RCC_AHB4ENSETR, 0, 0),
+
+ K_GATE(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
+ K_MGATE(G_RNG1, RCC_AHB5ENSETR, 6, 0),
+ K_GATE(G_HASH1, RCC_AHB5ENSETR, 5, 0),
+ K_GATE(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
+ K_GATE(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
+
+ K_GATE(G_USBH, RCC_AHB6ENSETR, 24, 0),
+ K_GATE(G_CRC1, RCC_AHB6ENSETR, 20, 0),
+ K_MGATE(G_SDMMC2, RCC_AHB6ENSETR, 17, 0),
+ K_MGATE(G_SDMMC1, RCC_AHB6ENSETR, 16, 0),
+ K_MGATE(G_QSPI, RCC_AHB6ENSETR, 14, 0),
+ K_MGATE(G_FMC, RCC_AHB6ENSETR, 12, 0),
+ K_GATE(G_ETHMAC, RCC_AHB6ENSETR, 10, 0),
+ K_GATE(G_ETHRX, RCC_AHB6ENSETR, 9, 0),
+ K_GATE(G_ETHTX, RCC_AHB6ENSETR, 8, 0),
+ K_GATE(G_ETHCK, RCC_AHB6ENSETR, 7, 0),
+ K_MGATE(G_GPU, RCC_AHB6ENSETR, 5, 0),
+ K_GATE(G_MDMA, RCC_AHB6ENSETR, 0, 0),
+ K_GATE(G_ETHSTP, RCC_AHB6LPENSETR, 11, 0),
+};
+
+enum {
+ M_SDMMC12,
+ M_SDMMC3,
+ M_FMC,
+ M_QSPI,
+ M_RNG1,
+ M_RNG2,
+ M_USBPHY,
+ M_USBO,
+ M_STGEN,
+ M_SPDIF,
+ M_SPI1,
+ M_SPI23,
+ M_SPI45,
+ M_SPI6,
+ M_CEC,
+ M_I2C12,
+ M_I2C35,
+ M_I2C46,
+ M_LPTIM1,
+ M_LPTIM23,
+ M_LPTIM45,
+ M_USART1,
+ M_UART24,
+ M_UART35,
+ M_USART6,
+ M_UART78,
+ M_SAI1,
+ M_SAI2,
+ M_SAI3,
+ M_SAI4,
+ M_DSI,
+ M_FDCAN,
+ M_ADC12,
+ M_ETHCK,
+ M_CKPER,
+ M_LAST
+};
+
+static struct stm32_mmux ker_mux[M_LAST];
+
+#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
+ [_id] = {\
+ &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ .table = NULL,\
+ },\
+ .mmux = _mmux,\
+ .ops = _ops,\
+ }
+
+#define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ NULL, NULL)
+
+#define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ &ker_mux[_id], &clk_mmux_ops)
+
+static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
+ /* Kernel multi mux */
+ K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
+ K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
+ K_MMUX(M_SPI45, RCC_SPI2S45CKSELR, 0, 3, 0),
+ K_MMUX(M_I2C12, RCC_I2C12CKSELR, 0, 3, 0),
+ K_MMUX(M_I2C35, RCC_I2C35CKSELR, 0, 3, 0),
+ K_MMUX(M_LPTIM23, RCC_LPTIM23CKSELR, 0, 3, 0),
+ K_MMUX(M_LPTIM45, RCC_LPTIM45CKSELR, 0, 3, 0),
+ K_MMUX(M_UART24, RCC_UART24CKSELR, 0, 3, 0),
+ K_MMUX(M_UART35, RCC_UART35CKSELR, 0, 3, 0),
+ K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
+ K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
+ K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
+ K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
+
+ /* Kernel simple mux */
+ K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
+ K_MUX(M_SDMMC3, RCC_SDMMC3CKSELR, 0, 3, 0),
+ K_MUX(M_FMC, RCC_FMCCKSELR, 0, 2, 0),
+ K_MUX(M_QSPI, RCC_QSPICKSELR, 0, 2, 0),
+ K_MUX(M_USBPHY, RCC_USBCKSELR, 0, 2, 0),
+ K_MUX(M_USBO, RCC_USBCKSELR, 4, 1, 0),
+ K_MUX(M_SPDIF, RCC_SPDIFCKSELR, 0, 2, 0),
+ K_MUX(M_SPI1, RCC_SPI2S1CKSELR, 0, 3, 0),
+ K_MUX(M_CEC, RCC_CECCKSELR, 0, 2, 0),
+ K_MUX(M_LPTIM1, RCC_LPTIM1CKSELR, 0, 3, 0),
+ K_MUX(M_USART6, RCC_UART6CKSELR, 0, 3, 0),
+ K_MUX(M_FDCAN, RCC_FDCANCKSELR, 0, 2, 0),
+ K_MUX(M_SAI2, RCC_SAI2CKSELR, 0, 3, 0),
+ K_MUX(M_SAI3, RCC_SAI3CKSELR, 0, 3, 0),
+ K_MUX(M_SAI4, RCC_SAI4CKSELR, 0, 3, 0),
+ K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
+ K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
+ K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
+ K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
+ K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
+ K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
+ K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
+};
+
+static const struct clock_config stm32mp1_clock_cfg[] = {
+ /* Oscillator divider */
+ DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2,
+ CLK_DIVIDER_READ_ONLY),
+
+ /* External / Internal Oscillators */
+ GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
+ GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
+ GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
+ GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
+ GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
+
+ FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
+
+ /* ref clock pll */
+ MUX(NO_ID, "ref1", ref12_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK12SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ MUX(NO_ID, "ref3", ref3_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK3SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ MUX(NO_ID, "ref4", ref4_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK4SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ /* PLLs */
+ PLL(PLL1, "pll1", "ref1", CLK_IGNORE_UNUSED, RCC_PLL1CR),
+ PLL(PLL2, "pll2", "ref1", CLK_IGNORE_UNUSED, RCC_PLL2CR),
+ PLL(PLL3, "pll3", "ref3", CLK_IGNORE_UNUSED, RCC_PLL3CR),
+ PLL(PLL4, "pll4", "ref4", CLK_IGNORE_UNUSED, RCC_PLL4CR),
+
+ /* ODF */
+ COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
+ _GATE(RCC_PLL1CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
+ _GATE(RCC_PLL2CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
+ _GATE(RCC_PLL2CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
+ _GATE(RCC_PLL2CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_Q, "pll3_q", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_R, "pll3_r", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 16, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_P, "pll4_p", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_Q, "pll4_q", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_R, "pll4_r", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 16, 7, 0, NULL)),
+
+ /* MUX system clocks */
+ MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
+ RCC_CPERCKSELR, 0, 2, 0),
+
+ MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
+ CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
+
+ COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
+ CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MUX(RCC_ASSCKSELR, 0, 2, 0),
+ _DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
+
+ COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
+ CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MUX(RCC_MSSCKSELR, 0, 2, 0),
+ _DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
+
+ DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk2", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB2DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk3", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB3DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk4", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB4DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk5", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB5DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ /* Kernel Timers */
+ STM32_CKTIM("ck1_tim", "pclk1", 0, RCC_APB1DIVR, RCC_TIMG1PRER),
+ STM32_CKTIM("ck2_tim", "pclk2", 0, RCC_APB2DIVR, RCC_TIMG2PRER),
+
+ STM32_TIM(TIM2_K, "tim2_k", "ck1_tim", RCC_APB1ENSETR, 0),
+ STM32_TIM(TIM3_K, "tim3_k", "ck1_tim", RCC_APB1ENSETR, 1),
+ STM32_TIM(TIM4_K, "tim4_k", "ck1_tim", RCC_APB1ENSETR, 2),
+ STM32_TIM(TIM5_K, "tim5_k", "ck1_tim", RCC_APB1ENSETR, 3),
+ STM32_TIM(TIM6_K, "tim6_k", "ck1_tim", RCC_APB1ENSETR, 4),
+ STM32_TIM(TIM7_K, "tim7_k", "ck1_tim", RCC_APB1ENSETR, 5),
+ STM32_TIM(TIM12_K, "tim12_k", "ck1_tim", RCC_APB1ENSETR, 6),
+ STM32_TIM(TIM13_K, "tim13_k", "ck1_tim", RCC_APB1ENSETR, 7),
+ STM32_TIM(TIM14_K, "tim14_k", "ck1_tim", RCC_APB1ENSETR, 8),
+ STM32_TIM(TIM1_K, "tim1_k", "ck2_tim", RCC_APB2ENSETR, 0),
+ STM32_TIM(TIM8_K, "tim8_k", "ck2_tim", RCC_APB2ENSETR, 1),
+ STM32_TIM(TIM15_K, "tim15_k", "ck2_tim", RCC_APB2ENSETR, 2),
+ STM32_TIM(TIM16_K, "tim16_k", "ck2_tim", RCC_APB2ENSETR, 3),
+ STM32_TIM(TIM17_K, "tim17_k", "ck2_tim", RCC_APB2ENSETR, 4),
+
+ /* Peripheral clocks */
+ PCLK(TIM2, "tim2", "pclk1", CLK_IGNORE_UNUSED, G_TIM2),
+ PCLK(TIM3, "tim3", "pclk1", CLK_IGNORE_UNUSED, G_TIM3),
+ PCLK(TIM4, "tim4", "pclk1", CLK_IGNORE_UNUSED, G_TIM4),
+ PCLK(TIM5, "tim5", "pclk1", CLK_IGNORE_UNUSED, G_TIM5),
+ PCLK(TIM6, "tim6", "pclk1", CLK_IGNORE_UNUSED, G_TIM6),
+ PCLK(TIM7, "tim7", "pclk1", CLK_IGNORE_UNUSED, G_TIM7),
+ PCLK(TIM12, "tim12", "pclk1", CLK_IGNORE_UNUSED, G_TIM12),
+ PCLK(TIM13, "tim13", "pclk1", CLK_IGNORE_UNUSED, G_TIM13),
+ PCLK(TIM14, "tim14", "pclk1", CLK_IGNORE_UNUSED, G_TIM14),
+ PCLK(LPTIM1, "lptim1", "pclk1", 0, G_LPTIM1),
+ PCLK(SPI2, "spi2", "pclk1", 0, G_SPI2),
+ PCLK(SPI3, "spi3", "pclk1", 0, G_SPI3),
+ PCLK(USART2, "usart2", "pclk1", 0, G_USART2),
+ PCLK(USART3, "usart3", "pclk1", 0, G_USART3),
+ PCLK(UART4, "uart4", "pclk1", 0, G_UART4),
+ PCLK(UART5, "uart5", "pclk1", 0, G_UART5),
+ PCLK(UART7, "uart7", "pclk1", 0, G_UART7),
+ PCLK(UART8, "uart8", "pclk1", 0, G_UART8),
+ PCLK(I2C1, "i2c1", "pclk1", 0, G_I2C1),
+ PCLK(I2C2, "i2c2", "pclk1", 0, G_I2C2),
+ PCLK(I2C3, "i2c3", "pclk1", 0, G_I2C3),
+ PCLK(I2C5, "i2c5", "pclk1", 0, G_I2C5),
+ PCLK(SPDIF, "spdif", "pclk1", 0, G_SPDIF),
+ PCLK(CEC, "cec", "pclk1", 0, G_CEC),
+ PCLK(DAC12, "dac12", "pclk1", 0, G_DAC12),
+ PCLK(MDIO, "mdio", "pclk1", 0, G_MDIO),
+ PCLK(TIM1, "tim1", "pclk2", CLK_IGNORE_UNUSED, G_TIM1),
+ PCLK(TIM8, "tim8", "pclk2", CLK_IGNORE_UNUSED, G_TIM8),
+ PCLK(TIM15, "tim15", "pclk2", CLK_IGNORE_UNUSED, G_TIM15),
+ PCLK(TIM16, "tim16", "pclk2", CLK_IGNORE_UNUSED, G_TIM16),
+ PCLK(TIM17, "tim17", "pclk2", CLK_IGNORE_UNUSED, G_TIM17),
+ PCLK(SPI1, "spi1", "pclk2", 0, G_SPI1),
+ PCLK(SPI4, "spi4", "pclk2", 0, G_SPI4),
+ PCLK(SPI5, "spi5", "pclk2", 0, G_SPI5),
+ PCLK(USART6, "usart6", "pclk2", 0, G_USART6),
+ PCLK(SAI1, "sai1", "pclk2", 0, G_SAI1),
+ PCLK(SAI2, "sai2", "pclk2", 0, G_SAI2),
+ PCLK(SAI3, "sai3", "pclk2", 0, G_SAI3),
+ PCLK(DFSDM, "dfsdm", "pclk2", 0, G_DFSDM),
+ PCLK(FDCAN, "fdcan", "pclk2", 0, G_FDCAN),
+ PCLK(LPTIM2, "lptim2", "pclk3", 0, G_LPTIM2),
+ PCLK(LPTIM3, "lptim3", "pclk3", 0, G_LPTIM3),
+ PCLK(LPTIM4, "lptim4", "pclk3", 0, G_LPTIM4),
+ PCLK(LPTIM5, "lptim5", "pclk3", 0, G_LPTIM5),
+ PCLK(SAI4, "sai4", "pclk3", 0, G_SAI4),
+ PCLK(SYSCFG, "syscfg", "pclk3", 0, G_SYSCFG),
+ PCLK(VREF, "vref", "pclk3", 13, G_VREF),
+ PCLK(TMPSENS, "tmpsens", "pclk3", 0, G_TMPSENS),
+ PCLK(PMBCTRL, "pmbctrl", "pclk3", 0, G_PMBCTRL),
+ PCLK(HDP, "hdp", "pclk3", 0, G_HDP),
+ PCLK(LTDC, "ltdc", "pclk4", 0, G_LTDC),
+ PCLK(DSI, "dsi", "pclk4", 0, G_DSI),
+ PCLK(IWDG2, "iwdg2", "pclk4", 0, G_IWDG2),
+ PCLK(USBPHY, "usbphy", "pclk4", 0, G_USBPHY),
+ PCLK(STGENRO, "stgenro", "pclk4", 0, G_STGENRO),
+ PCLK(SPI6, "spi6", "pclk5", 0, G_SPI6),
+ PCLK(I2C4, "i2c4", "pclk5", 0, G_I2C4),
+ PCLK(I2C6, "i2c6", "pclk5", 0, G_I2C6),
+ PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
+ PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
+ CLK_IS_CRITICAL, G_RTCAPB),
+ PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1),
+ PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2),
+ PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
+ PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
+ PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
+ PCLK(STGEN, "stgen", "pclk5", CLK_IGNORE_UNUSED, G_STGEN),
+ PCLK(DMA1, "dma1", "ck_mcu", 0, G_DMA1),
+ PCLK(DMA2, "dma2", "ck_mcu", 0, G_DMA2),
+ PCLK(DMAMUX, "dmamux", "ck_mcu", 0, G_DMAMUX),
+ PCLK(ADC12, "adc12", "ck_mcu", 0, G_ADC12),
+ PCLK(USBO, "usbo", "ck_mcu", 0, G_USBO),
+ PCLK(SDMMC3, "sdmmc3", "ck_mcu", 0, G_SDMMC3),
+ PCLK(DCMI, "dcmi", "ck_mcu", 0, G_DCMI),
+ PCLK(CRYP2, "cryp2", "ck_mcu", 0, G_CRYP2),
+ PCLK(HASH2, "hash2", "ck_mcu", 0, G_HASH2),
+ PCLK(RNG2, "rng2", "ck_mcu", 0, G_RNG2),
+ PCLK(CRC2, "crc2", "ck_mcu", 0, G_CRC2),
+ PCLK(HSEM, "hsem", "ck_mcu", 0, G_HSEM),
+ PCLK(IPCC, "ipcc", "ck_mcu", 0, G_IPCC),
+ PCLK(GPIOA, "gpioa", "ck_mcu", 0, G_GPIOA),
+ PCLK(GPIOB, "gpiob", "ck_mcu", 0, G_GPIOB),
+ PCLK(GPIOC, "gpioc", "ck_mcu", 0, G_GPIOC),
+ PCLK(GPIOD, "gpiod", "ck_mcu", 0, G_GPIOD),
+ PCLK(GPIOE, "gpioe", "ck_mcu", 0, G_GPIOE),
+ PCLK(GPIOF, "gpiof", "ck_mcu", 0, G_GPIOF),
+ PCLK(GPIOG, "gpiog", "ck_mcu", 0, G_GPIOG),
+ PCLK(GPIOH, "gpioh", "ck_mcu", 0, G_GPIOH),
+ PCLK(GPIOI, "gpioi", "ck_mcu", 0, G_GPIOI),
+ PCLK(GPIOJ, "gpioj", "ck_mcu", 0, G_GPIOJ),
+ PCLK(GPIOK, "gpiok", "ck_mcu", 0, G_GPIOK),
+ PCLK(GPIOZ, "gpioz", "ck_axi", CLK_IGNORE_UNUSED, G_GPIOZ),
+ PCLK(CRYP1, "cryp1", "ck_axi", CLK_IGNORE_UNUSED, G_CRYP1),
+ PCLK(HASH1, "hash1", "ck_axi", CLK_IGNORE_UNUSED, G_HASH1),
+ PCLK(RNG1, "rng1", "ck_axi", 0, G_RNG1),
+ PCLK(BKPSRAM, "bkpsram", "ck_axi", CLK_IGNORE_UNUSED, G_BKPSRAM),
+ PCLK(MDMA, "mdma", "ck_axi", 0, G_MDMA),
+ PCLK(GPU, "gpu", "ck_axi", 0, G_GPU),
+ PCLK(ETHTX, "ethtx", "ck_axi", 0, G_ETHTX),
+ PCLK(ETHRX, "ethrx", "ck_axi", 0, G_ETHRX),
+ PCLK(ETHMAC, "ethmac", "ck_axi", 0, G_ETHMAC),
+ PCLK(FMC, "fmc", "ck_axi", CLK_IGNORE_UNUSED, G_FMC),
+ PCLK(QSPI, "qspi", "ck_axi", CLK_IGNORE_UNUSED, G_QSPI),
+ PCLK(SDMMC1, "sdmmc1", "ck_axi", 0, G_SDMMC1),
+ PCLK(SDMMC2, "sdmmc2", "ck_axi", 0, G_SDMMC2),
+ PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
+ PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
+ PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+
+ /* Kernel clocks */
+ KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
+ KCLK(SDMMC2_K, "sdmmc2_k", sdmmc12_src, 0, G_SDMMC2, M_SDMMC12),
+ KCLK(SDMMC3_K, "sdmmc3_k", sdmmc3_src, 0, G_SDMMC3, M_SDMMC3),
+ KCLK(FMC_K, "fmc_k", fmc_src, 0, G_FMC, M_FMC),
+ KCLK(QSPI_K, "qspi_k", qspi_src, 0, G_QSPI, M_QSPI),
+ KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
+ KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
+ KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
+ KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
+ KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
+ KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
+ KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
+ KCLK(SPI3_K, "spi3_k", spi123_src, 0, G_SPI3, M_SPI23),
+ KCLK(SPI4_K, "spi4_k", spi45_src, 0, G_SPI4, M_SPI45),
+ KCLK(SPI5_K, "spi5_k", spi45_src, 0, G_SPI5, M_SPI45),
+ KCLK(SPI6_K, "spi6_k", spi6_src, 0, G_SPI6, M_SPI6),
+ KCLK(CEC_K, "cec_k", cec_src, 0, G_CEC, M_CEC),
+ KCLK(I2C1_K, "i2c1_k", i2c12_src, 0, G_I2C1, M_I2C12),
+ KCLK(I2C2_K, "i2c2_k", i2c12_src, 0, G_I2C2, M_I2C12),
+ KCLK(I2C3_K, "i2c3_k", i2c35_src, 0, G_I2C3, M_I2C35),
+ KCLK(I2C5_K, "i2c5_k", i2c35_src, 0, G_I2C5, M_I2C35),
+ KCLK(I2C4_K, "i2c4_k", i2c46_src, 0, G_I2C4, M_I2C46),
+ KCLK(I2C6_K, "i2c6_k", i2c46_src, 0, G_I2C6, M_I2C46),
+ KCLK(LPTIM1_K, "lptim1_k", lptim1_src, 0, G_LPTIM1, M_LPTIM1),
+ KCLK(LPTIM2_K, "lptim2_k", lptim23_src, 0, G_LPTIM2, M_LPTIM23),
+ KCLK(LPTIM3_K, "lptim3_k", lptim23_src, 0, G_LPTIM3, M_LPTIM23),
+ KCLK(LPTIM4_K, "lptim4_k", lptim45_src, 0, G_LPTIM4, M_LPTIM45),
+ KCLK(LPTIM5_K, "lptim5_k", lptim45_src, 0, G_LPTIM5, M_LPTIM45),
+ KCLK(USART1_K, "usart1_k", usart1_src, 0, G_USART1, M_USART1),
+ KCLK(USART2_K, "usart2_k", usart234578_src, 0, G_USART2, M_UART24),
+ KCLK(USART3_K, "usart3_k", usart234578_src, 0, G_USART3, M_UART35),
+ KCLK(UART4_K, "uart4_k", usart234578_src, 0, G_UART4, M_UART24),
+ KCLK(UART5_K, "uart5_k", usart234578_src, 0, G_UART5, M_UART35),
+ KCLK(USART6_K, "uart6_k", usart6_src, 0, G_USART6, M_USART6),
+ KCLK(UART7_K, "uart7_k", usart234578_src, 0, G_UART7, M_UART78),
+ KCLK(UART8_K, "uart8_k", usart234578_src, 0, G_UART8, M_UART78),
+ KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
+ KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
+ KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
+ KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3),
+ KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4),
+ KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
+ KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
+ KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
+ KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
+ KCLK(ETHCK_K, "ethck_k", eth_src, 0, G_ETHCK, M_ETHCK),
+
+ /* Particulary Kernel Clocks (no mux or no gate) */
+ MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
+ MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
+ MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
+ MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
+ MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
+
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MMUX(M_ETHCK),
+ _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
+
+ /* RTC clock */
+ DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
+ CLK_DIVIDER_ALLOW_ZERO),
+
+ COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_PARENT,
+ _GATE(RCC_BDCR, 20, 0),
+ _MUX(RCC_BDCR, 16, 2, 0),
+ _NO_DIV),
+
+ /* MCO clocks */
+ COMPOSITE(CK_MCO1, "ck_mco1", mco1_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
+ _GATE(RCC_MCO1CFGR, 12, 0),
+ _MUX(RCC_MCO1CFGR, 0, 3, 0),
+ _DIV(RCC_MCO1CFGR, 4, 4, 0, NULL)),
+
+ COMPOSITE(CK_MCO2, "ck_mco2", mco2_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
+ _GATE(RCC_MCO2CFGR, 12, 0),
+ _MUX(RCC_MCO2CFGR, 0, 3, 0),
+ _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
+
+ /* Debug clocks */
+ GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0),
+
+ COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
+ _GATE(RCC_DBGCFGR, 9, 0),
+ _NO_MUX,
+ _DIV(RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table)),
+};
+
+struct stm32_clock_match_data {
+ const struct clock_config *cfg;
+ unsigned int num;
+ unsigned int maxbinding;
+};
+
+static struct stm32_clock_match_data stm32mp1_data = {
+ .cfg = stm32mp1_clock_cfg,
+ .num = ARRAY_SIZE(stm32mp1_clock_cfg),
+ .maxbinding = STM32MP1_LAST_CLK,
+};
+
+static const struct of_device_id stm32mp1_match_data[] = {
+ {
+ .compatible = "st,stm32mp1-rcc",
+ .data = &stm32mp1_data,
+ },
+ { }
+};
+
+static int stm32_register_hw_clk(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ static struct clk_hw **hws;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+
+ hws = clk_data->hws;
+
+ if (cfg->func)
+ hw = (*cfg->func)(dev, clk_data, base, lock, cfg);
+
+ if (IS_ERR(hw)) {
+ pr_err("Unable to register %s\n", cfg->name);
+ return PTR_ERR(hw);
+ }
+
+ if (cfg->id != NO_ID)
+ hws[cfg->id] = hw;
+
+ return 0;
+}
+
+static int stm32_rcc_init(struct device_node *np,
+ void __iomem *base,
+ const struct of_device_id *match_data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ const struct of_device_id *match;
+ const struct stm32_clock_match_data *data;
+ int err, n, max_binding;
+
+ match = of_match_node(match_data, np);
+ if (!match) {
+ pr_err("%s: match data not found\n", __func__);
+ return -ENODEV;
+ }
+
+ data = match->data;
+
+ max_binding = data->maxbinding;
+
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * max_binding,
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = max_binding;
+
+ hws = clk_data->hws;
+
+ for (n = 0; n < max_binding; n++)
+ hws[n] = ERR_PTR(-ENOENT);
+
+ for (n = 0; n < data->num; n++) {
+ err = stm32_register_hw_clk(NULL, clk_data, base, &rlock,
+ &data->cfg[n]);
+ if (err) {
+ pr_err("%s: can't register %s\n", __func__,
+ data->cfg[n].name);
+
+ kfree(clk_data);
+
+ return err;
+ }
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+static void stm32mp1_rcc_init(struct device_node *np)
+{
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: unable to map resource", np->name);
+ of_node_put(np);
+ return;
+ }
+
+ if (stm32_rcc_init(np, base, stm32mp1_match_data)) {
+ iounmap(base);
+ of_node_put(np);
+ }
+}
+
+CLK_OF_DECLARE_DRIVER(stm32mp1_rcc, "st,stm32mp1-rcc", stm32mp1_rcc_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 076d4244d672..7af555f0e60c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -426,9 +426,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
-static int
-clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
- unsigned long flags)
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long flags)
{
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
@@ -488,6 +488,7 @@ out:
return 0;
}
+EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
struct clk *__clk_lookup(const char *name)
{
@@ -2375,6 +2376,9 @@ static int clk_core_get_phase(struct clk_core *core)
int ret;
clk_prepare_lock();
+ /* Always try to update cached phase if possible */
+ if (core->ops->get_phase)
+ core->phase = core->ops->get_phase(core->hw);
ret = core->phase;
clk_prepare_unlock();
@@ -2491,19 +2495,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
return 0;
}
-
-
-static int clk_summary_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_summary_show, inode->i_private);
-}
-
-static const struct file_operations clk_summary_fops = {
- .open = clk_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
@@ -2537,7 +2529,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
seq_putc(s, '}');
}
-static int clk_dump(struct seq_file *s, void *data)
+static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
@@ -2560,19 +2552,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_puts(s, "}\n");
return 0;
}
-
-
-static int clk_dump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_dump, inode->i_private);
-}
-
-static const struct file_operations clk_dump_fops = {
- .open = clk_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(clk_dump);
static const struct {
unsigned long flag;
@@ -2594,7 +2574,7 @@ static const struct {
#undef ENTRY
};
-static int clk_flags_dump(struct seq_file *s, void *data)
+static int clk_flags_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long flags = core->flags;
@@ -2613,20 +2593,9 @@ static int clk_flags_dump(struct seq_file *s, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(clk_flags);
-static int clk_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_flags_dump, inode->i_private);
-}
-
-static const struct file_operations clk_flags_fops = {
- .open = clk_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int possible_parents_dump(struct seq_file *s, void *data)
+static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
int i;
@@ -2638,18 +2607,7 @@ static int possible_parents_dump(struct seq_file *s, void *data)
return 0;
}
-
-static int possible_parents_open(struct inode *inode, struct file *file)
-{
- return single_open(file, possible_parents_dump, inode->i_private);
-}
-
-static const struct file_operations possible_parents_fops = {
- .open = possible_parents_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(possible_parents);
static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
@@ -2933,6 +2891,17 @@ static int __clk_core_init(struct clk_core *core)
}
/*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic.
+ * Please consider other ways of solving initialization problems before
+ * using this callback, as its use is discouraged.
+ */
+ if (core->ops->init)
+ core->ops->init(core->hw);
+
+ /*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
@@ -3009,17 +2978,6 @@ static int __clk_core_init(struct clk_core *core)
}
}
- /*
- * optional platform-specific magic
- *
- * The .init callback is not used by any of the basic clock types, but
- * exists for weird hardware that must perform initialization magic.
- * Please consider other ways of solving initialization problems before
- * using this callback, as its use is discouraged.
- */
- if (core->ops->init)
- core->ops->init(core->hw);
-
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
new file mode 100644
index 000000000000..11178b79b483
--- /dev/null
+++ b/drivers/clk/davinci/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
+
+obj-y += pll.o
+obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
+obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
+obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
+obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
+obj-$(CONFIG_ARCH_DAVINCI_DM644x) += pll-dm644x.o
+obj-$(CONFIG_ARCH_DAVINCI_DM646x) += pll-dm646x.o
+
+obj-y += psc.o
+obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
+obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
+obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
+obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
+obj-$(CONFIG_ARCH_DAVINCI_DM644x) += psc-dm644x.o
+obj-$(CONFIG_ARCH_DAVINCI_DM646x) += psc-dm646x.o
+endif
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
new file mode 100644
index 000000000000..c971111d2601
--- /dev/null
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for DA8xx/AM17xx/AM18xx/OMAP-L13x CFGCHIP
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/clk-da8xx-cfgchip.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* --- Gate clocks --- */
+
+#define DA8XX_GATE_CLOCK_IS_DIV4P5 BIT(1)
+
+struct da8xx_cfgchip_gate_clk_info {
+ const char *name;
+ u32 cfgchip;
+ u32 bit;
+ u32 flags;
+};
+
+struct da8xx_cfgchip_gate_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+};
+
+#define to_da8xx_cfgchip_gate_clk(_hw) \
+ container_of((_hw), struct da8xx_cfgchip_gate_clk, hw)
+
+static int da8xx_cfgchip_gate_clk_enable(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+
+ return regmap_write_bits(clk->regmap, clk->reg, clk->mask, clk->mask);
+}
+
+static void da8xx_cfgchip_gate_clk_disable(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+
+ regmap_write_bits(clk->regmap, clk->reg, clk->mask, 0);
+}
+
+static int da8xx_cfgchip_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+ unsigned int val;
+
+ regmap_read(clk->regmap, clk->reg, &val);
+
+ return !!(val & clk->mask);
+}
+
+static unsigned long da8xx_cfgchip_div4p5_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /* this clock divides by 4.5 */
+ return parent_rate * 2 / 9;
+}
+
+static const struct clk_ops da8xx_cfgchip_gate_clk_ops = {
+ .enable = da8xx_cfgchip_gate_clk_enable,
+ .disable = da8xx_cfgchip_gate_clk_disable,
+ .is_enabled = da8xx_cfgchip_gate_clk_is_enabled,
+};
+
+static const struct clk_ops da8xx_cfgchip_div4p5_clk_ops = {
+ .enable = da8xx_cfgchip_gate_clk_enable,
+ .disable = da8xx_cfgchip_gate_clk_disable,
+ .is_enabled = da8xx_cfgchip_gate_clk_is_enabled,
+ .recalc_rate = da8xx_cfgchip_div4p5_recalc_rate,
+};
+
+static struct da8xx_cfgchip_gate_clk * __init
+da8xx_cfgchip_gate_clk_register(struct device *dev,
+ const struct da8xx_cfgchip_gate_clk_info *info,
+ struct regmap *regmap)
+{
+ struct clk *parent;
+ const char *parent_name;
+ struct da8xx_cfgchip_gate_clk *gate;
+ struct clk_init_data init;
+ int ret;
+
+ parent = devm_clk_get(dev, NULL);
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = info->name;
+ if (info->flags & DA8XX_GATE_CLOCK_IS_DIV4P5)
+ init.ops = &da8xx_cfgchip_div4p5_clk_ops;
+ else
+ init.ops = &da8xx_cfgchip_gate_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = info->cfgchip;
+ gate->mask = info->bit;
+
+ ret = devm_clk_hw_register(dev, &gate->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return gate;
+}
+
+static const struct da8xx_cfgchip_gate_clk_info da8xx_tbclksync_info __initconst = {
+ .name = "ehrpwm_tbclk",
+ .cfgchip = CFGCHIP(1),
+ .bit = CFGCHIP1_TBCLKSYNC,
+};
+
+static int __init da8xx_cfgchip_register_tbclk(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_tbclksync_info,
+ regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ clk_hw_register_clkdev(&gate->hw, "tbclk", "ehrpwm.0");
+ clk_hw_register_clkdev(&gate->hw, "tbclk", "ehrpwm.1");
+
+ return 0;
+}
+
+static const struct da8xx_cfgchip_gate_clk_info da8xx_div4p5ena_info __initconst = {
+ .name = "div4.5",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_DIV45PENA,
+ .flags = DA8XX_GATE_CLOCK_IS_DIV4P5,
+};
+
+static int __init da8xx_cfgchip_register_div4p5(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_div4p5ena_info, regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ return 0;
+}
+
+static int __init
+of_da8xx_cfgchip_gate_clk_init(struct device *dev,
+ const struct da8xx_cfgchip_gate_clk_info *info,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, info, regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, gate);
+}
+
+static int __init of_da8xx_tbclksync_init(struct device *dev,
+ struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_gate_clk_init(dev, &da8xx_tbclksync_info, regmap);
+}
+
+static int __init of_da8xx_div4p5ena_init(struct device *dev,
+ struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_gate_clk_init(dev, &da8xx_div4p5ena_info, regmap);
+}
+
+/* --- MUX clocks --- */
+
+struct da8xx_cfgchip_mux_clk_info {
+ const char *name;
+ const char *parent0;
+ const char *parent1;
+ u32 cfgchip;
+ u32 bit;
+};
+
+struct da8xx_cfgchip_mux_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+};
+
+#define to_da8xx_cfgchip_mux_clk(_hw) \
+ container_of((_hw), struct da8xx_cfgchip_mux_clk, hw)
+
+static int da8xx_cfgchip_mux_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_cfgchip_mux_clk *clk = to_da8xx_cfgchip_mux_clk(hw);
+ unsigned int val = index ? clk->mask : 0;
+
+ return regmap_write_bits(clk->regmap, clk->reg, clk->mask, val);
+}
+
+static u8 da8xx_cfgchip_mux_clk_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_mux_clk *clk = to_da8xx_cfgchip_mux_clk(hw);
+ unsigned int val;
+
+ regmap_read(clk->regmap, clk->reg, &val);
+
+ return (val & clk->mask) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_cfgchip_mux_clk_ops = {
+ .set_parent = da8xx_cfgchip_mux_clk_set_parent,
+ .get_parent = da8xx_cfgchip_mux_clk_get_parent,
+};
+
+static struct da8xx_cfgchip_mux_clk * __init
+da8xx_cfgchip_mux_clk_register(struct device *dev,
+ const struct da8xx_cfgchip_mux_clk_info *info,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { info->parent0, info->parent1 };
+ struct da8xx_cfgchip_mux_clk *mux;
+ struct clk_init_data init;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = info->name;
+ init.ops = &da8xx_cfgchip_mux_clk_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+ init.flags = 0;
+
+ mux->hw.init = &init;
+ mux->regmap = regmap;
+ mux->reg = info->cfgchip;
+ mux->mask = info->bit;
+
+ ret = devm_clk_hw_register(dev, &mux->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return mux;
+}
+
+static const struct da8xx_cfgchip_mux_clk_info da850_async1_info __initconst = {
+ .name = "async1",
+ .parent0 = "pll0_sysclk3",
+ .parent1 = "div4.5",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_EMA_CLKSRC,
+};
+
+static int __init da8xx_cfgchip_register_async1(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, &da850_async1_info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ clk_hw_register_clkdev(&mux->hw, "async1", "da850-psc0");
+
+ return 0;
+}
+
+static const struct da8xx_cfgchip_mux_clk_info da850_async3_info __initconst = {
+ .name = "async3",
+ .parent0 = "pll0_sysclk2",
+ .parent1 = "pll1_sysclk2",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_ASYNC3_CLKSRC,
+};
+
+static int __init da850_cfgchip_register_async3(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+ struct clk_hw *parent;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, &da850_async3_info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ clk_hw_register_clkdev(&mux->hw, "async3", "da850-psc1");
+
+ /* pll1_sysclk2 is not affected by CPU scaling, so use it for async3 */
+ parent = clk_hw_get_parent_by_index(&mux->hw, 1);
+ if (parent)
+ clk_set_parent(mux->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find async3 parent clock\n");
+
+ return 0;
+}
+
+static int __init
+of_da8xx_cfgchip_init_mux_clock(struct device *dev,
+ const struct da8xx_cfgchip_mux_clk_info *info,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &mux->hw);
+}
+
+static int __init of_da850_async1_init(struct device *dev, struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_init_mux_clock(dev, &da850_async1_info, regmap);
+}
+
+static int __init of_da850_async3_init(struct device *dev, struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_init_mux_clock(dev, &da850_async3_info, regmap);
+}
+
+/* --- USB 2.0 PHY clock --- */
+
+struct da8xx_usb0_clk48 {
+ struct clk_hw hw;
+ struct clk *fck;
+ struct regmap *regmap;
+};
+
+#define to_da8xx_usb0_clk48(_hw) \
+ container_of((_hw), struct da8xx_usb0_clk48, hw)
+
+static int da8xx_usb0_clk48_prepare(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ /* The USB 2.0 PSC clock is only needed temporarily during the USB 2.0
+ * PHY clock enable, but since clk_prepare() can't be called in an
+ * atomic context (i.e. in clk_enable()), we have to prepare it here.
+ */
+ return clk_prepare(usb0->fck);
+}
+
+static void da8xx_usb0_clk48_unprepare(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ clk_unprepare(usb0->fck);
+}
+
+static int da8xx_usb0_clk48_enable(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int mask, val;
+ int ret;
+
+ /* Locking the USB 2.O PLL requires that the USB 2.O PSC is enabled
+ * temporaily. It can be turned back off once the PLL is locked.
+ */
+ clk_enable(usb0->fck);
+
+ /* Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
+ * PHY may use the USB 2.0 PLL clock without USB 2.0 OTG being used.
+ */
+ mask = CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_PHY_PLLON;
+ val = CFGCHIP2_PHY_PLLON;
+
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), mask, val);
+ ret = regmap_read_poll_timeout(usb0->regmap, CFGCHIP(2), val,
+ val & CFGCHIP2_PHYCLKGD, 0, 500000);
+
+ clk_disable(usb0->fck);
+
+ return ret;
+}
+
+static void da8xx_usb0_clk48_disable(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ val = CFGCHIP2_PHYPWRDN;
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), val, val);
+}
+
+static int da8xx_usb0_clk48_is_enabled(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb0->regmap, CFGCHIP(2), &val);
+
+ return !!(val & CFGCHIP2_PHYCLKGD);
+}
+
+static unsigned long da8xx_usb0_clk48_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int mask, val;
+
+ /* The parent clock rate must be one of the following */
+ mask = CFGCHIP2_REFFREQ_MASK;
+ switch (parent_rate) {
+ case 12000000:
+ val = CFGCHIP2_REFFREQ_12MHZ;
+ break;
+ case 13000000:
+ val = CFGCHIP2_REFFREQ_13MHZ;
+ break;
+ case 19200000:
+ val = CFGCHIP2_REFFREQ_19_2MHZ;
+ break;
+ case 20000000:
+ val = CFGCHIP2_REFFREQ_20MHZ;
+ break;
+ case 24000000:
+ val = CFGCHIP2_REFFREQ_24MHZ;
+ break;
+ case 26000000:
+ val = CFGCHIP2_REFFREQ_26MHZ;
+ break;
+ case 38400000:
+ val = CFGCHIP2_REFFREQ_38_4MHZ;
+ break;
+ case 40000000:
+ val = CFGCHIP2_REFFREQ_40MHZ;
+ break;
+ case 48000000:
+ val = CFGCHIP2_REFFREQ_48MHZ;
+ break;
+ default:
+ return 0;
+ }
+
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), mask, val);
+
+ /* USB 2.0 PLL always supplies 48MHz */
+ return 48000000;
+}
+
+static long da8xx_usb0_clk48_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return 48000000;
+}
+
+static int da8xx_usb0_clk48_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ return regmap_write_bits(usb0->regmap, CFGCHIP(2),
+ CFGCHIP2_USB2PHYCLKMUX,
+ index ? CFGCHIP2_USB2PHYCLKMUX : 0);
+}
+
+static u8 da8xx_usb0_clk48_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb0->regmap, CFGCHIP(2), &val);
+
+ return (val & CFGCHIP2_USB2PHYCLKMUX) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_usb0_clk48_ops = {
+ .prepare = da8xx_usb0_clk48_prepare,
+ .unprepare = da8xx_usb0_clk48_unprepare,
+ .enable = da8xx_usb0_clk48_enable,
+ .disable = da8xx_usb0_clk48_disable,
+ .is_enabled = da8xx_usb0_clk48_is_enabled,
+ .recalc_rate = da8xx_usb0_clk48_recalc_rate,
+ .round_rate = da8xx_usb0_clk48_round_rate,
+ .set_parent = da8xx_usb0_clk48_set_parent,
+ .get_parent = da8xx_usb0_clk48_get_parent,
+};
+
+static struct da8xx_usb0_clk48 *
+da8xx_cfgchip_register_usb0_clk48(struct device *dev,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ struct clk *fck_clk;
+ struct da8xx_usb0_clk48 *usb0;
+ struct clk_init_data init;
+ int ret;
+
+ fck_clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(fck_clk)) {
+ if (PTR_ERR(fck_clk) != -EPROBE_DEFER)
+ dev_err(dev, "Missing fck clock\n");
+ return ERR_CAST(fck_clk);
+ }
+
+ usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL);
+ if (!usb0)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "usb0_clk48";
+ init.ops = &da8xx_usb0_clk48_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ usb0->hw.init = &init;
+ usb0->fck = fck_clk;
+ usb0->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &usb0->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return usb0;
+}
+
+/* --- USB 1.1 PHY clock --- */
+
+struct da8xx_usb1_clk48 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+#define to_da8xx_usb1_clk48(_hw) \
+ container_of((_hw), struct da8xx_usb1_clk48, hw)
+
+static int da8xx_usb1_clk48_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_usb1_clk48 *usb1 = to_da8xx_usb1_clk48(hw);
+
+ return regmap_write_bits(usb1->regmap, CFGCHIP(2),
+ CFGCHIP2_USB1PHYCLKMUX,
+ index ? CFGCHIP2_USB1PHYCLKMUX : 0);
+}
+
+static u8 da8xx_usb1_clk48_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_usb1_clk48 *usb1 = to_da8xx_usb1_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb1->regmap, CFGCHIP(2), &val);
+
+ return (val & CFGCHIP2_USB1PHYCLKMUX) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_usb1_clk48_ops = {
+ .set_parent = da8xx_usb1_clk48_set_parent,
+ .get_parent = da8xx_usb1_clk48_get_parent,
+};
+
+/**
+ * da8xx_cfgchip_register_usb1_clk48 - Register a new USB 1.1 PHY clock
+ * @regmap: The CFGCHIP regmap
+ */
+static struct da8xx_usb1_clk48 *
+da8xx_cfgchip_register_usb1_clk48(struct device *dev,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ struct da8xx_usb1_clk48 *usb1;
+ struct clk_init_data init;
+ int ret;
+
+ usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
+ if (!usb1)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "usb1_clk48";
+ init.ops = &da8xx_usb1_clk48_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ usb1->hw.init = &init;
+ usb1->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &usb1->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return usb1;
+}
+
+static int da8xx_cfgchip_register_usb_phy_clk(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_usb0_clk48 *usb0;
+ struct da8xx_usb1_clk48 *usb1;
+ struct clk_hw *parent;
+
+ usb0 = da8xx_cfgchip_register_usb0_clk48(dev, regmap);
+ if (IS_ERR(usb0))
+ return PTR_ERR(usb0);
+
+ /*
+ * All existing boards use pll0_auxclk as the parent and new boards
+ * should use device tree, so hard-coding the value (1) here.
+ */
+ parent = clk_hw_get_parent_by_index(&usb0->hw, 1);
+ if (parent)
+ clk_set_parent(usb0->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find usb0 parent clock\n");
+
+ usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
+ if (IS_ERR(usb1))
+ return PTR_ERR(usb1);
+
+ /*
+ * All existing boards use usb0_clk48 as the parent and new boards
+ * should use device tree, so hard-coding the value (0) here.
+ */
+ parent = clk_hw_get_parent_by_index(&usb1->hw, 0);
+ if (parent)
+ clk_set_parent(usb1->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find usb1 parent clock\n");
+
+ clk_hw_register_clkdev(&usb0->hw, "usb0_clk48", "da8xx-usb-phy");
+ clk_hw_register_clkdev(&usb1->hw, "usb1_clk48", "da8xx-usb-phy");
+
+ return 0;
+}
+
+static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct da8xx_usb0_clk48 *usb0;
+ struct da8xx_usb1_clk48 *usb1;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data) + 2 *
+ sizeof(*clk_data->hws), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = 2;
+
+ usb0 = da8xx_cfgchip_register_usb0_clk48(dev, regmap);
+ if (IS_ERR(usb0)) {
+ if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(dev, "Failed to register usb0_clk48 (%ld)\n",
+ PTR_ERR(usb0));
+
+ clk_data->hws[0] = ERR_PTR(-ENOENT);
+ } else {
+ clk_data->hws[0] = &usb0->hw;
+ }
+
+ usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
+ if (IS_ERR(usb1)) {
+ if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
+ PTR_ERR(usb1));
+
+ clk_data->hws[1] = ERR_PTR(-ENOENT);
+ } else {
+ clk_data->hws[1] = &usb1->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+/* --- platform device --- */
+
+static const struct of_device_id da8xx_cfgchip_of_match[] = {
+ {
+ .compatible = "ti,da830-tbclksync",
+ .data = of_da8xx_tbclksync_init,
+ },
+ {
+ .compatible = "ti,da830-div4p5ena",
+ .data = of_da8xx_div4p5ena_init,
+ },
+ {
+ .compatible = "ti,da850-async1-clksrc",
+ .data = of_da850_async1_init,
+ },
+ {
+ .compatible = "ti,da850-async3-clksrc",
+ .data = of_da850_async3_init,
+ },
+ {
+ .compatible = "ti,da830-usb-phy-clocks",
+ .data = of_da8xx_usb_phy_clk_init,
+ },
+ { }
+};
+
+static const struct platform_device_id da8xx_cfgchip_id_table[] = {
+ {
+ .name = "da830-tbclksync",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_tbclk,
+ },
+ {
+ .name = "da830-div4p5ena",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_div4p5,
+ },
+ {
+ .name = "da850-async1-clksrc",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_async1,
+ },
+ {
+ .name = "da850-async3-clksrc",
+ .driver_data = (kernel_ulong_t)da850_cfgchip_register_async3,
+ },
+ {
+ .name = "da830-usb-phy-clks",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_usb_phy_clk,
+ },
+ { }
+};
+
+typedef int (*da8xx_cfgchip_init)(struct device *dev, struct regmap *regmap);
+
+static int da8xx_cfgchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct da8xx_cfgchip_clk_platform_data *pdata = dev->platform_data;
+ const struct of_device_id *of_id;
+ da8xx_cfgchip_init clk_init = NULL;
+ struct regmap *regmap = NULL;
+
+ of_id = of_match_device(da8xx_cfgchip_of_match, dev);
+ if (of_id) {
+ struct device_node *parent;
+
+ clk_init = of_id->data;
+ parent = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ } else if (pdev->id_entry && pdata) {
+ clk_init = (void *)pdev->id_entry->driver_data;
+ regmap = pdata->cfgchip;
+ }
+
+ if (!clk_init) {
+ dev_err(dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(regmap)) {
+ dev_err(dev, "no regmap for CFGCHIP syscon\n");
+ return regmap ? PTR_ERR(regmap) : -ENOENT;
+ }
+
+ return clk_init(dev, regmap);
+}
+
+static struct platform_driver da8xx_cfgchip_driver = {
+ .probe = da8xx_cfgchip_probe,
+ .driver = {
+ .name = "da8xx-cfgchip-clk",
+ .of_match_table = da8xx_cfgchip_of_match,
+ },
+ .id_table = da8xx_cfgchip_id_table,
+};
+
+static int __init da8xx_cfgchip_driver_init(void)
+{
+ return platform_driver_register(&da8xx_cfgchip_driver);
+}
+
+/* has to be postcore_initcall because PSC devices depend on the async3 clock */
+postcore_initcall(da8xx_cfgchip_driver_init);
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
new file mode 100644
index 000000000000..929a3d3a9adb
--- /dev/null
+++ b/drivers/clk/davinci/pll-da830.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DA830/OMAP-L137/AM17XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clkdev.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info da830_pll_info = {
+ .name = "pll0",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
+};
+
+/*
+ * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
+ * meaning that we could change the divider as long as we keep the correct
+ * ratio between all of the clocks, but we don't support that because there is
+ * currently not a need for it.
+ */
+
+SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
+SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
+SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
+
+int da830_pll_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
+ clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
+ clk_register_clkdev(clk, "pll0_sysclk3", "da830-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
+ clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
+ clk_register_clkdev(clk, "pll0_sysclk5", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
+ clk_register_clkdev(clk, "pll0_sysclk6", "da830-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
+
+ clk = davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
+ clk_register_clkdev(clk, NULL, "i2c_davinci.1");
+ clk_register_clkdev(clk, "timer0", NULL);
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
new file mode 100644
index 000000000000..2a038b7908cc
--- /dev/null
+++ b/drivers/clk/davinci/pll-da850.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DA850/OMAP-L138/AM18XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define OCSEL_OCSRC_OSCIN 0x14
+#define OCSEL_OCSRC_PLL0_SYSCLK(n) (0x16 + (n))
+#define OCSEL_OCSRC_PLL1_OBSCLK 0x1e
+#define OCSEL_OCSRC_PLL1_SYSCLK(n) (0x16 + (n))
+
+static const struct davinci_pll_clk_info da850_pll0_info = {
+ .name = "pll0",
+ .unlock_reg = CFGCHIP(0),
+ .unlock_mask = CFGCHIP0_PLL_MASTER_LOCK,
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
+ PLL_HAS_EXTCLKSRC,
+};
+
+/*
+ * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
+ * meaning that we could change the divider as long as we keep the correct
+ * ratio between all of the clocks, but we don't support that because there is
+ * currently not a need for it.
+ */
+
+SYSCLK(1, pll0_sysclk1, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
+SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
+SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_ARM_RATE | SYSCLK_FIXED_DIV);
+SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
+
+static const char * const da850_pll0_obsclk_parent_names[] = {
+ "oscin",
+ "pll0_sysclk1",
+ "pll0_sysclk2",
+ "pll0_sysclk3",
+ "pll0_sysclk4",
+ "pll0_sysclk5",
+ "pll0_sysclk6",
+ "pll0_sysclk7",
+ "pll1_obsclk",
+};
+
+static u32 da850_pll0_obsclk_table[] = {
+ OCSEL_OCSRC_OSCIN,
+ OCSEL_OCSRC_PLL0_SYSCLK(1),
+ OCSEL_OCSRC_PLL0_SYSCLK(2),
+ OCSEL_OCSRC_PLL0_SYSCLK(3),
+ OCSEL_OCSRC_PLL0_SYSCLK(4),
+ OCSEL_OCSRC_PLL0_SYSCLK(5),
+ OCSEL_OCSRC_PLL0_SYSCLK(6),
+ OCSEL_OCSRC_PLL0_SYSCLK(7),
+ OCSEL_OCSRC_PLL1_OBSCLK,
+};
+
+static const struct davinci_pll_obsclk_info da850_pll0_obsclk_info = {
+ .name = "pll0_obsclk",
+ .parent_names = da850_pll0_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(da850_pll0_obsclk_parent_names),
+ .table = da850_pll0_obsclk_table,
+ .ocsrc_mask = GENMASK(4, 0),
+};
+
+int da850_pll0_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da850_pll0_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk1, base);
+ clk_register_clkdev(clk, "pll0_sysclk1", "da850-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-psc1");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-async3-clksrc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
+ clk_register_clkdev(clk, "pll0_sysclk3", "da850-async1-clksrc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
+ clk_register_clkdev(clk, "pll0_sysclk4", "da850-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk4", "da850-psc1");
+
+ davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
+ clk_register_clkdev(clk, "pll0_sysclk6", "da850-psc0");
+
+ davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
+
+ davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
+
+ clk = clk_register_fixed_factor(dev, "async2", "pll0_auxclk",
+ CLK_IS_CRITICAL, 1, 1);
+
+ clk_register_clkdev(clk, NULL, "i2c_davinci.1");
+ clk_register_clkdev(clk, "timer0", NULL);
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ davinci_pll_obsclk_register(dev, &da850_pll0_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_sysclk_info *da850_pll0_sysclk_info[] = {
+ &pll0_sysclk1,
+ &pll0_sysclk2,
+ &pll0_sysclk3,
+ &pll0_sysclk4,
+ &pll0_sysclk5,
+ &pll0_sysclk6,
+ &pll0_sysclk7,
+ NULL
+};
+
+int of_da850_pll0_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_pll_init(dev, &da850_pll0_info,
+ &da850_pll0_obsclk_info,
+ da850_pll0_sysclk_info, 7, base);
+}
+
+static const struct davinci_pll_clk_info da850_pll1_info = {
+ .name = "pll1",
+ .unlock_reg = CFGCHIP(3),
+ .unlock_mask = CFGCHIP3_PLL1_MASTER_LOCK,
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_POSTDIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, 0);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, 0);
+
+static const char * const da850_pll1_obsclk_parent_names[] = {
+ "oscin",
+ "pll1_sysclk1",
+ "pll1_sysclk2",
+ "pll1_sysclk3",
+};
+
+static u32 da850_pll1_obsclk_table[] = {
+ OCSEL_OCSRC_OSCIN,
+ OCSEL_OCSRC_PLL1_SYSCLK(1),
+ OCSEL_OCSRC_PLL1_SYSCLK(2),
+ OCSEL_OCSRC_PLL1_SYSCLK(3),
+};
+
+static const struct davinci_pll_obsclk_info da850_pll1_obsclk_info = {
+ .name = "pll1_obsclk",
+ .parent_names = da850_pll1_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(da850_pll1_obsclk_parent_names),
+ .table = da850_pll1_obsclk_table,
+ .ocsrc_mask = GENMASK(4, 0),
+};
+
+int da850_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da850_pll1_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "da850-async3-clksrc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+
+ davinci_pll_obsclk_register(dev, &da850_pll1_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_sysclk_info *da850_pll1_sysclk_info[] = {
+ &pll1_sysclk1,
+ &pll1_sysclk2,
+ &pll1_sysclk3,
+ NULL
+};
+
+int of_da850_pll1_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_pll_init(dev, &da850_pll1_info,
+ &da850_pll1_obsclk_info,
+ da850_pll1_sysclk_info, 3, base);
+}
diff --git a/drivers/clk/davinci/pll-dm355.c b/drivers/clk/davinci/pll-dm355.c
new file mode 100644
index 000000000000..5345f8286c50
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm355.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM355
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm355_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(7, 0),
+ .pllm_min = 92,
+ .pllm_max = 184,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED |
+ PLL_PREDIV_FIXED8 | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll1_sysclk3, pll1, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll1_sysclk4, pll1, 5, SYSCLK_ALWAYS_ENABLED);
+
+int dm355_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm355-psc");
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm355_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(7, 0),
+ .pllm_min = 92,
+ .pllm_max = 184,
+ .flags = PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2, 5, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll2_sysclk2, pll2, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+
+int dm355_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm365.c b/drivers/clk/davinci/pll-dm365.c
new file mode 100644
index 000000000000..5f8d9f42d0f3
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm365.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM365
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define OCSEL_OCSRC_ENABLE 0
+
+static const struct davinci_pll_clk_info dm365_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(9, 0),
+ .pllm_min = 1,
+ .pllm_max = 1023,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_PLLM_2X,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(6, pll1_sysclk6, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(7, pll1_sysclk7, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(8, pll1_sysclk8, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(9, pll1_sysclk9, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+
+/*
+ * This is a bit of a hack to make OCSEL[OCSRC] on DM365 look like OCSEL[OCSRC]
+ * on DA850. On DM365, OCSEL[OCSRC] is just an enable/disable bit instead of a
+ * multiplexer. By modeling it as a single parent mux clock, the clock code will
+ * still do the right thing in this case.
+ */
+static const char * const dm365_pll_obsclk_parent_names[] = {
+ "oscin",
+};
+
+static u32 dm365_pll_obsclk_table[] = {
+ OCSEL_OCSRC_ENABLE,
+};
+
+static const struct davinci_pll_obsclk_info dm365_pll1_obsclk_info = {
+ .name = "pll1_obsclk",
+ .parent_names = dm365_pll_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
+ .table = dm365_pll_obsclk_table,
+ .ocsrc_mask = BIT(4),
+};
+
+int dm365_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk7, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
+ clk_register_clkdev(clk, "pll1_sysclk8", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ davinci_pll_obsclk_register(dev, &dm365_pll1_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm365_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(9, 0),
+ .pllm_min = 1,
+ .pllm_max = 1023,
+ .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV | PLL_POSTDIV_ALWAYS_ENABLED |
+ PLL_PLLM_2X,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll2_sysclk2, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll2_sysclk3, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll2_sysclk4, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(5, pll2_sysclk5, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+
+static const struct davinci_pll_obsclk_info dm365_pll2_obsclk_info = {
+ .name = "pll2_obsclk",
+ .parent_names = dm365_pll_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
+ .table = dm365_pll_obsclk_table,
+ .ocsrc_mask = BIT(4),
+};
+
+int dm365_pll2_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk3, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll2_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk5, base);
+
+ davinci_pll_auxclk_register(dev, "pll2_auxclk", base);
+
+ davinci_pll_obsclk_register(dev, &dm365_pll2_obsclk_info, base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm644x.c b/drivers/clk/davinci/pll-dm644x.c
new file mode 100644
index 000000000000..69bf785377cf
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm644x.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM644X
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm644x_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 1,
+ .pllm_max = 32,
+ .pllout_min_rate = 400000000,
+ .pllout_max_rate = 600000000, /* 810MHz @ 1.3V, -810 only */
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_POSTDIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+
+int dm644x_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm644x-psc");
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm644x-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm644x_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 1,
+ .pllm_max = 32,
+ .pllout_min_rate = 400000000,
+ .pllout_max_rate = 900000000,
+ .flags = PLL_HAS_POSTDIV | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
+SYSCLK(2, pll2_sysclk2, pll2_pllen, 4, 0);
+
+int dm644x_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm646x.c b/drivers/clk/davinci/pll-dm646x.c
new file mode 100644
index 000000000000..a61cc3256418
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm646x.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM646X
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm646x_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 14,
+ .pllm_max = 32,
+ .flags = PLL_HAS_CLKMODE,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(4, pll1_sysclk4, pll1_pllen, 4, 0);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, 0);
+SYSCLK(6, pll1_sysclk6, pll1_pllen, 4, 0);
+SYSCLK(8, pll1_sysclk8, pll1_pllen, 4, 0);
+SYSCLK(9, pll1_sysclk9, pll1_pllen, 4, 0);
+
+int dm646x_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm646x-psc");
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm646x-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm646x_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 14,
+ .pllm_max = 32,
+ .flags = 0,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
+
+int dm646x_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
new file mode 100644
index 000000000000..23a24c944f1d
--- /dev/null
+++ b/drivers/clk/davinci/pll.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock driver for TI Davinci SoCs
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on arch/arm/mach-davinci/clock.c
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/clk-davinci-pll.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define MAX_NAME_SIZE 20
+#define OSCIN_CLK_NAME "oscin"
+
+#define REVID 0x000
+#define PLLCTL 0x100
+#define OCSEL 0x104
+#define PLLSECCTL 0x108
+#define PLLM 0x110
+#define PREDIV 0x114
+#define PLLDIV1 0x118
+#define PLLDIV2 0x11c
+#define PLLDIV3 0x120
+#define OSCDIV 0x124
+#define POSTDIV 0x128
+#define BPDIV 0x12c
+#define PLLCMD 0x138
+#define PLLSTAT 0x13c
+#define ALNCTL 0x140
+#define DCHANGE 0x144
+#define CKEN 0x148
+#define CKSTAT 0x14c
+#define SYSTAT 0x150
+#define PLLDIV4 0x160
+#define PLLDIV5 0x164
+#define PLLDIV6 0x168
+#define PLLDIV7 0x16c
+#define PLLDIV8 0x170
+#define PLLDIV9 0x174
+
+#define PLLCTL_PLLEN BIT(0)
+#define PLLCTL_PLLPWRDN BIT(1)
+#define PLLCTL_PLLRST BIT(3)
+#define PLLCTL_PLLDIS BIT(4)
+#define PLLCTL_PLLENSRC BIT(5)
+#define PLLCTL_CLKMODE BIT(8)
+
+/* shared by most *DIV registers */
+#define DIV_RATIO_SHIFT 0
+#define DIV_RATIO_WIDTH 5
+#define DIV_ENABLE_SHIFT 15
+
+#define PLLCMD_GOSET BIT(0)
+#define PLLSTAT_GOSTAT BIT(0)
+
+#define CKEN_OBSCLK_SHIFT 1
+#define CKEN_AUXEN_SHIFT 0
+
+/*
+ * OMAP-L138 system reference guide recommends a wait for 4 OSCIN/CLKIN
+ * cycles to ensure that the PLLC has switched to bypass mode. Delay of 1us
+ * ensures we are good for all > 4MHz OSCIN/CLKIN inputs. Typically the input
+ * is ~25MHz. Units are micro seconds.
+ */
+#define PLL_BYPASS_TIME 1
+
+/* From OMAP-L138 datasheet table 6-4. Units are micro seconds */
+#define PLL_RESET_TIME 1
+
+/*
+ * From OMAP-L138 datasheet table 6-4; assuming prediv = 1, sqrt(pllm) = 4
+ * Units are micro seconds.
+ */
+#define PLL_LOCK_TIME 20
+
+/**
+ * struct davinci_pll_clk - Main PLL clock (aka PLLOUT)
+ * @hw: clk_hw for the pll
+ * @base: Base memory address
+ * @pllm_min: The minimum allowable PLLM[PLLM] value
+ * @pllm_max: The maxiumum allowable PLLM[PLLM] value
+ * @pllm_mask: Bitmask for PLLM[PLLM] value
+ */
+struct davinci_pll_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+ u32 pllm_min;
+ u32 pllm_max;
+ u32 pllm_mask;
+};
+
+#define to_davinci_pll_clk(_hw) \
+ container_of((_hw), struct davinci_pll_clk, hw)
+
+static unsigned long davinci_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ unsigned long rate = parent_rate;
+ u32 mult;
+
+ mult = readl(pll->base + PLLM) & pll->pllm_mask;
+ rate *= mult + 1;
+
+ return rate;
+}
+
+static int davinci_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ struct clk_hw *parent = req->best_parent_hw;
+ unsigned long parent_rate = req->best_parent_rate;
+ unsigned long rate = req->rate;
+ unsigned long best_rate, r;
+ u32 mult;
+
+ /* there is a limited range of valid outputs (see datasheet) */
+ if (rate < req->min_rate)
+ return -EINVAL;
+
+ rate = min(rate, req->max_rate);
+ mult = rate / parent_rate;
+ best_rate = parent_rate * mult;
+
+ /* easy case when there is no PREDIV */
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ if (best_rate < req->min_rate)
+ return -EINVAL;
+
+ if (mult < pll->pllm_min || mult > pll->pllm_max)
+ return -EINVAL;
+
+ req->rate = best_rate;
+
+ return 0;
+ }
+
+ /* see if the PREDIV clock can help us */
+ best_rate = 0;
+
+ for (mult = pll->pllm_min; mult <= pll->pllm_max; mult++) {
+ parent_rate = clk_hw_round_rate(parent, rate / mult);
+ r = parent_rate * mult;
+ if (r < req->min_rate)
+ continue;
+ if (r > rate || r > req->max_rate)
+ break;
+ if (r > best_rate) {
+ best_rate = r;
+ req->rate = best_rate;
+ req->best_parent_rate = parent_rate;
+ if (best_rate == rate)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int davinci_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ u32 mult;
+
+ mult = rate / parent_rate;
+ writel(mult - 1, pll->base + PLLM);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry);
+#else
+#define davinci_pll_debug_init NULL
+#endif
+
+static const struct clk_ops davinci_pll_ops = {
+ .recalc_rate = davinci_pll_recalc_rate,
+ .determine_rate = davinci_pll_determine_rate,
+ .set_rate = davinci_pll_set_rate,
+ .debug_init = davinci_pll_debug_init,
+};
+
+/* PLLM works differently on DM365 */
+static unsigned long dm365_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ unsigned long rate = parent_rate;
+ u32 mult;
+
+ mult = readl(pll->base + PLLM) & pll->pllm_mask;
+ rate *= mult * 2;
+
+ return rate;
+}
+
+static const struct clk_ops dm365_pll_ops = {
+ .recalc_rate = dm365_pll_recalc_rate,
+ .debug_init = davinci_pll_debug_init,
+};
+
+/**
+ * davinci_pll_div_register - common *DIV clock implementation
+ * @name: the clock name
+ * @parent_name: the parent clock name
+ * @reg: the *DIV register
+ * @fixed: if true, the divider is a fixed value
+ * @flags: bitmap of CLK_* flags from clock-provider.h
+ */
+static struct clk *davinci_pll_div_register(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ bool fixed, u32 flags)
+{
+ const char * const *parent_names = parent_name ? &parent_name : NULL;
+ int num_parents = parent_name ? 1 : 0;
+ const struct clk_ops *divider_ops = &clk_divider_ops;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = reg;
+ gate->bit_idx = DIV_ENABLE_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = reg;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = DIV_RATIO_WIDTH;
+
+ if (fixed) {
+ divider->flags |= CLK_DIVIDER_READ_ONLY;
+ divider_ops = &clk_divider_ro_ops;
+ }
+
+ return clk_register_composite(dev, name, parent_names, num_parents,
+ NULL, NULL, &divider->hw, divider_ops,
+ &gate->hw, &clk_gate_ops, flags);
+}
+
+struct davinci_pllen_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_davinci_pllen_clk(_hw) \
+ container_of((_hw), struct davinci_pllen_clk, hw)
+
+static const struct clk_ops davinci_pllen_ops = {
+ /* this clocks just uses the clock notification feature */
+};
+
+/*
+ * The PLL has to be switched into bypass mode while we are chaning the rate,
+ * so we do that on the PLLEN clock since it is the end of the line. This will
+ * switch to bypass before any of the parent clocks (PREDIV, PLL, POSTDIV) are
+ * changed and will switch back to the PLL after the changes have been made.
+ */
+static int davinci_pllen_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct clk_hw *hw = __clk_get_hw(cnd->clk);
+ struct davinci_pllen_clk *pll = to_davinci_pllen_clk(hw);
+ u32 ctrl;
+
+ ctrl = readl(pll->base + PLLCTL);
+
+ if (flags == PRE_RATE_CHANGE) {
+ /* Switch the PLL to bypass mode */
+ ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
+ writel(ctrl, pll->base + PLLCTL);
+
+ udelay(PLL_BYPASS_TIME);
+
+ /* Reset and enable PLL */
+ ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
+ writel(ctrl, pll->base + PLLCTL);
+ } else {
+ udelay(PLL_RESET_TIME);
+
+ /* Bring PLL out of reset */
+ ctrl |= PLLCTL_PLLRST;
+ writel(ctrl, pll->base + PLLCTL);
+
+ udelay(PLL_LOCK_TIME);
+
+ /* Remove PLL from bypass mode */
+ ctrl |= PLLCTL_PLLEN;
+ writel(ctrl, pll->base + PLLCTL);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
+{
+ struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
+
+ /*
+ * Platform data is optional, so allocate a new struct if one was not
+ * provided. For device tree, this will always be the case.
+ */
+ if (!pdata)
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* for device tree, we need to fill in the struct */
+ if (dev->of_node)
+ pdata->cfgchip =
+ syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
+
+ return pdata;
+}
+
+static struct notifier_block davinci_pllen_notifier = {
+ .notifier_call = davinci_pllen_rate_change,
+};
+
+/**
+ * davinci_pll_clk_register - Register a PLL clock
+ * @info: The device-specific clock info
+ * @parent_name: The parent clock name
+ * @base: The PLL's memory region
+ *
+ * This creates a series of clocks that represent the PLL.
+ *
+ * OSCIN > [PREDIV >] PLL > [POSTDIV >] PLLEN
+ *
+ * - OSCIN is the parent clock (on secondary PLL, may come from primary PLL)
+ * - PREDIV and POSTDIV are optional (depends on the PLL controller)
+ * - PLL is the PLL output (aka PLLOUT)
+ * - PLLEN is the bypass multiplexer
+ *
+ * Returns: The PLLOUT clock or a negative error code.
+ */
+struct clk *davinci_pll_clk_register(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct davinci_pll_platform_data *pdata;
+ char prediv_name[MAX_NAME_SIZE];
+ char pllout_name[MAX_NAME_SIZE];
+ char postdiv_name[MAX_NAME_SIZE];
+ char pllen_name[MAX_NAME_SIZE];
+ struct clk_init_data init;
+ struct davinci_pll_clk *pllout;
+ struct davinci_pllen_clk *pllen;
+ struct clk *pllout_clk, *clk;
+
+ pdata = davinci_pll_get_pdata(dev);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ if (info->flags & PLL_HAS_CLKMODE) {
+ /*
+ * If a PLL has PLLCTL[CLKMODE], then it is the primary PLL.
+ * We register a clock named "oscin" that serves as the internal
+ * "input clock" domain shared by both PLLs (if there are 2)
+ * and will be the parent clock to the AUXCLK, SYSCLKBP and
+ * OBSCLK domains. NB: The various TRMs use "OSCIN" to mean
+ * a number of different things. In this driver we use it to
+ * mean the signal after the PLLCTL[CLKMODE] switch.
+ */
+ clk = clk_register_fixed_factor(dev, OSCIN_CLK_NAME,
+ parent_name, 0, 1, 1);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = OSCIN_CLK_NAME;
+ }
+
+ if (info->flags & PLL_HAS_PREDIV) {
+ bool fixed = info->flags & PLL_PREDIV_FIXED_DIV;
+ u32 flags = 0;
+
+ snprintf(prediv_name, MAX_NAME_SIZE, "%s_prediv", info->name);
+
+ if (info->flags & PLL_PREDIV_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ /* Some? DM355 chips don't correctly report the PREDIV value */
+ if (info->flags & PLL_PREDIV_FIXED8)
+ clk = clk_register_fixed_factor(dev, prediv_name,
+ parent_name, flags, 1, 8);
+ else
+ clk = davinci_pll_div_register(dev, prediv_name,
+ parent_name, base + PREDIV, fixed, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = prediv_name;
+ }
+
+ /* Unlock writing to PLL registers */
+ if (info->unlock_reg) {
+ if (IS_ERR_OR_NULL(pdata->cfgchip))
+ dev_warn(dev, "Failed to get CFGCHIP (%ld)\n",
+ PTR_ERR(pdata->cfgchip));
+ else
+ regmap_write_bits(pdata->cfgchip, info->unlock_reg,
+ info->unlock_mask, 0);
+ }
+
+ pllout = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
+ if (!pllout)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(pllout_name, MAX_NAME_SIZE, "%s_pllout", info->name);
+
+ init.name = pllout_name;
+ if (info->flags & PLL_PLLM_2X)
+ init.ops = &dm365_pll_ops;
+ else
+ init.ops = &davinci_pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ if (info->flags & PLL_HAS_PREDIV)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ pllout->hw.init = &init;
+ pllout->base = base;
+ pllout->pllm_mask = info->pllm_mask;
+ pllout->pllm_min = info->pllm_min;
+ pllout->pllm_max = info->pllm_max;
+
+ pllout_clk = devm_clk_register(dev, &pllout->hw);
+ if (IS_ERR(pllout_clk))
+ return pllout_clk;
+
+ clk_hw_set_rate_range(&pllout->hw, info->pllout_min_rate,
+ info->pllout_max_rate);
+
+ parent_name = pllout_name;
+
+ if (info->flags & PLL_HAS_POSTDIV) {
+ bool fixed = info->flags & PLL_POSTDIV_FIXED_DIV;
+ u32 flags = CLK_SET_RATE_PARENT;
+
+ snprintf(postdiv_name, MAX_NAME_SIZE, "%s_postdiv", info->name);
+
+ if (info->flags & PLL_POSTDIV_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ clk = davinci_pll_div_register(dev, postdiv_name, parent_name,
+ base + POSTDIV, fixed, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = postdiv_name;
+ }
+
+ pllen = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
+ if (!pllen)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(pllen_name, MAX_NAME_SIZE, "%s_pllen", info->name);
+
+ init.name = pllen_name;
+ init.ops = &davinci_pllen_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ pllen->hw.init = &init;
+ pllen->base = base;
+
+ clk = devm_clk_register(dev, &pllen->hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ clk_notifier_register(clk, &davinci_pllen_notifier);
+
+ return pllout_clk;
+}
+
+/**
+ * davinci_pll_auxclk_register - Register bypass clock (AUXCLK)
+ * @name: The clock name
+ * @base: The PLL memory region
+ */
+struct clk *davinci_pll_auxclk_register(struct device *dev,
+ const char *name,
+ void __iomem *base)
+{
+ return clk_register_gate(dev, name, OSCIN_CLK_NAME, 0, base + CKEN,
+ CKEN_AUXEN_SHIFT, 0, NULL);
+}
+
+/**
+ * davinci_pll_sysclkbp_clk_register - Register bypass divider clock (SYSCLKBP)
+ * @name: The clock name
+ * @base: The PLL memory region
+ */
+struct clk *davinci_pll_sysclkbp_clk_register(struct device *dev,
+ const char *name,
+ void __iomem *base)
+{
+ return clk_register_divider(dev, name, OSCIN_CLK_NAME, 0, base + BPDIV,
+ DIV_RATIO_SHIFT, DIV_RATIO_WIDTH,
+ CLK_DIVIDER_READ_ONLY, NULL);
+}
+
+/**
+ * davinci_pll_obsclk_register - Register oscillator divider clock (OBSCLK)
+ * @info: The clock info
+ * @base: The PLL memory region
+ */
+struct clk *
+davinci_pll_obsclk_register(struct device *dev,
+ const struct davinci_pll_obsclk_info *info,
+ void __iomem *base)
+{
+ struct clk_mux *mux;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+ u32 oscdiv;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + OCSEL;
+ mux->table = info->table;
+ mux->mask = info->ocsrc_mask;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = base + CKEN;
+ gate->bit_idx = CKEN_OBSCLK_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = base + OSCDIV;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = DIV_RATIO_WIDTH;
+
+ /* make sure divider is enabled just in case bootloader disabled it */
+ oscdiv = readl(base + OSCDIV);
+ oscdiv |= BIT(DIV_ENABLE_SHIFT);
+ writel(oscdiv, base + OSCDIV);
+
+ return clk_register_composite(dev, info->name, info->parent_names,
+ info->num_parents,
+ &mux->hw, &clk_mux_ops,
+ &divider->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops, 0);
+}
+
+/* The PLL SYSCLKn clocks have a mechanism for synchronizing rate changes. */
+static int davinci_pll_sysclk_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct clk_hw *hw = __clk_get_hw(clk_get_parent(cnd->clk));
+ struct davinci_pllen_clk *pll = to_davinci_pllen_clk(hw);
+ u32 pllcmd, pllstat;
+
+ switch (flags) {
+ case POST_RATE_CHANGE:
+ /* apply the changes */
+ pllcmd = readl(pll->base + PLLCMD);
+ pllcmd |= PLLCMD_GOSET;
+ writel(pllcmd, pll->base + PLLCMD);
+ /* fallthrough */
+ case PRE_RATE_CHANGE:
+ /* Wait until for outstanding changes to take effect */
+ do {
+ pllstat = readl(pll->base + PLLSTAT);
+ } while (pllstat & PLLSTAT_GOSTAT);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block davinci_pll_sysclk_notifier = {
+ .notifier_call = davinci_pll_sysclk_rate_change,
+};
+
+/**
+ * davinci_pll_sysclk_register - Register divider clocks (SYSCLKn)
+ * @info: The clock info
+ * @base: The PLL memory region
+ */
+struct clk *
+davinci_pll_sysclk_register(struct device *dev,
+ const struct davinci_pll_sysclk_info *info,
+ void __iomem *base)
+{
+ const struct clk_ops *divider_ops = &clk_divider_ops;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+ struct clk *clk;
+ u32 reg;
+ u32 flags = 0;
+
+ /* PLLDIVn registers are not entirely consecutive */
+ if (info->id < 4)
+ reg = PLLDIV1 + 4 * (info->id - 1);
+ else
+ reg = PLLDIV4 + 4 * (info->id - 4);
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = base + reg;
+ gate->bit_idx = DIV_ENABLE_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = base + reg;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = info->ratio_width;
+ divider->flags = 0;
+
+ if (info->flags & SYSCLK_FIXED_DIV) {
+ divider->flags |= CLK_DIVIDER_READ_ONLY;
+ divider_ops = &clk_divider_ro_ops;
+ }
+
+ /* Only the ARM clock can change the parent PLL rate */
+ if (info->flags & SYSCLK_ARM_RATE)
+ flags |= CLK_SET_RATE_PARENT;
+
+ if (info->flags & SYSCLK_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ clk = clk_register_composite(dev, info->name, &info->parent_name, 1,
+ NULL, NULL, &divider->hw, divider_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ clk_notifier_register(clk, &davinci_pll_sysclk_notifier);
+
+ return clk;
+}
+
+int of_davinci_pll_init(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const struct davinci_pll_obsclk_info *obsclk_info,
+ const struct davinci_pll_sysclk_info **div_info,
+ u8 max_sysclk_id,
+ void __iomem *base)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *child;
+ const char *parent_name;
+ struct clk *clk;
+
+ if (info->flags & PLL_HAS_CLKMODE)
+ parent_name = of_clk_get_parent_name(node, 0);
+ else
+ parent_name = OSCIN_CLK_NAME;
+
+ clk = davinci_pll_clk_register(dev, info, parent_name, base);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register %s\n", info->name);
+ return PTR_ERR(clk);
+ }
+
+ child = of_get_child_by_name(node, "pllout");
+ if (of_device_is_available(child))
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "sysclk");
+ if (of_device_is_available(child)) {
+ struct clk_onecell_data *clk_data;
+ struct clk **clks;
+ int n_clks = max_sysclk_id + 1;
+ int i;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clks = devm_kmalloc_array(dev, n_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ clk_data->clks = clks;
+ clk_data->clk_num = n_clks;
+
+ for (i = 0; i < n_clks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (; *div_info; div_info++) {
+ clk = davinci_pll_sysclk_register(dev, *div_info, base);
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register %s (%ld)\n",
+ (*div_info)->name, PTR_ERR(clk));
+ else
+ clks[(*div_info)->id] = clk;
+ }
+ of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ }
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "auxclk");
+ if (of_device_is_available(child)) {
+ char child_name[MAX_NAME_SIZE];
+
+ snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
+
+ clk = davinci_pll_auxclk_register(dev, child_name, base);
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register %s (%ld)\n",
+ child_name, PTR_ERR(clk));
+ else
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ }
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "obsclk");
+ if (of_device_is_available(child)) {
+ if (obsclk_info)
+ clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
+ else
+ clk = ERR_PTR(-EINVAL);
+
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register obsclk (%ld)\n",
+ PTR_ERR(clk));
+ else
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ }
+ of_node_put(child);
+
+ return 0;
+}
+
+static const struct of_device_id davinci_pll_of_match[] = {
+ { .compatible = "ti,da850-pll0", .data = of_da850_pll0_init },
+ { .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
+ { }
+};
+
+static const struct platform_device_id davinci_pll_id_table[] = {
+ { .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
+ { .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
+ { .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
+ { .name = "dm355-pll1", .driver_data = (kernel_ulong_t)dm355_pll1_init },
+ { .name = "dm355-pll2", .driver_data = (kernel_ulong_t)dm355_pll2_init },
+ { .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
+ { .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
+ { .name = "dm644x-pll1", .driver_data = (kernel_ulong_t)dm644x_pll1_init },
+ { .name = "dm644x-pll2", .driver_data = (kernel_ulong_t)dm644x_pll2_init },
+ { .name = "dm646x-pll1", .driver_data = (kernel_ulong_t)dm646x_pll1_init },
+ { .name = "dm646x-pll2", .driver_data = (kernel_ulong_t)dm646x_pll2_init },
+ { }
+};
+
+typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base);
+
+static int davinci_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ davinci_pll_init pll_init = NULL;
+ struct resource *res;
+ void __iomem *base;
+
+ of_id = of_match_device(davinci_pll_of_match, dev);
+ if (of_id)
+ pll_init = of_id->data;
+ else if (pdev->id_entry)
+ pll_init = (void *)pdev->id_entry->driver_data;
+
+ if (!pll_init) {
+ dev_err(dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return pll_init(dev, base);
+}
+
+static struct platform_driver davinci_pll_driver = {
+ .probe = davinci_pll_probe,
+ .driver = {
+ .name = "davinci-pll-clk",
+ .of_match_table = davinci_pll_of_match,
+ },
+ .id_table = davinci_pll_id_table,
+};
+
+static int __init davinci_pll_driver_init(void)
+{
+ return platform_driver_register(&davinci_pll_driver);
+}
+
+/* has to be postcore_initcall because PSC devices depend on PLL parent clocks */
+postcore_initcall(davinci_pll_driver_init);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+#define DEBUG_REG(n) \
+{ \
+ .name = #n, \
+ .offset = n, \
+}
+
+static const struct debugfs_reg32 davinci_pll_regs[] = {
+ DEBUG_REG(REVID),
+ DEBUG_REG(PLLCTL),
+ DEBUG_REG(OCSEL),
+ DEBUG_REG(PLLSECCTL),
+ DEBUG_REG(PLLM),
+ DEBUG_REG(PREDIV),
+ DEBUG_REG(PLLDIV1),
+ DEBUG_REG(PLLDIV2),
+ DEBUG_REG(PLLDIV3),
+ DEBUG_REG(OSCDIV),
+ DEBUG_REG(POSTDIV),
+ DEBUG_REG(BPDIV),
+ DEBUG_REG(PLLCMD),
+ DEBUG_REG(PLLSTAT),
+ DEBUG_REG(ALNCTL),
+ DEBUG_REG(DCHANGE),
+ DEBUG_REG(CKEN),
+ DEBUG_REG(CKSTAT),
+ DEBUG_REG(SYSTAT),
+ DEBUG_REG(PLLDIV4),
+ DEBUG_REG(PLLDIV5),
+ DEBUG_REG(PLLDIV6),
+ DEBUG_REG(PLLDIV7),
+ DEBUG_REG(PLLDIV8),
+ DEBUG_REG(PLLDIV9),
+};
+
+static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ struct debugfs_regset32 *regset;
+ struct dentry *d;
+
+ regset = kzalloc(sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = davinci_pll_regs;
+ regset->nregs = ARRAY_SIZE(davinci_pll_regs);
+ regset->base = pll->base;
+
+ d = debugfs_create_regset32("registers", 0400, dentry, regset);
+ if (IS_ERR(d)) {
+ kfree(regset);
+ return PTR_ERR(d);
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
new file mode 100644
index 000000000000..b1b6fb23f972
--- /dev/null
+++ b/drivers/clk/davinci/pll.h
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __CLK_DAVINCI_PLL_H___
+#define __CLK_DAVINCI_PLL_H___
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#define PLL_HAS_CLKMODE BIT(0) /* PLL has PLLCTL[CLKMODE] */
+#define PLL_HAS_PREDIV BIT(1) /* has prediv before PLL */
+#define PLL_PREDIV_ALWAYS_ENABLED BIT(2) /* don't clear DEN bit */
+#define PLL_PREDIV_FIXED_DIV BIT(3) /* fixed divider value */
+#define PLL_HAS_POSTDIV BIT(4) /* has postdiv after PLL */
+#define PLL_POSTDIV_ALWAYS_ENABLED BIT(5) /* don't clear DEN bit */
+#define PLL_POSTDIV_FIXED_DIV BIT(6) /* fixed divider value */
+#define PLL_HAS_EXTCLKSRC BIT(7) /* has selectable bypass */
+#define PLL_PLLM_2X BIT(8) /* PLLM value is 2x (DM365) */
+#define PLL_PREDIV_FIXED8 BIT(9) /* DM355 quirk */
+
+/** davinci_pll_clk_info - controller-specific PLL info
+ * @name: The name of the PLL
+ * @unlock_reg: Option CFGCHIP register for unlocking PLL
+ * @unlock_mask: Bitmask used with @unlock_reg
+ * @pllm_mask: Bitmask for PLLM[PLLM] value
+ * @pllm_min: Minimum allowable value for PLLM[PLLM]
+ * @pllm_max: Maximum allowable value for PLLM[PLLM]
+ * @pllout_min_rate: Minimum allowable rate for PLLOUT
+ * @pllout_max_rate: Maximum allowable rate for PLLOUT
+ * @flags: Bitmap of PLL_* flags.
+ */
+struct davinci_pll_clk_info {
+ const char *name;
+ u32 unlock_reg;
+ u32 unlock_mask;
+ u32 pllm_mask;
+ u32 pllm_min;
+ u32 pllm_max;
+ unsigned long pllout_min_rate;
+ unsigned long pllout_max_rate;
+ u32 flags;
+};
+
+#define SYSCLK_ARM_RATE BIT(0) /* Controls ARM rate */
+#define SYSCLK_ALWAYS_ENABLED BIT(1) /* Or bad things happen */
+#define SYSCLK_FIXED_DIV BIT(2) /* Fixed divider */
+
+/** davinci_pll_sysclk_info - SYSCLKn-specific info
+ * @name: The name of the clock
+ * @parent_name: The name of the parent clock
+ * @id: "n" in "SYSCLKn"
+ * @ratio_width: Width (in bits) of RATIO in PLLDIVn register
+ * @flags: Bitmap of SYSCLK_* flags.
+ */
+struct davinci_pll_sysclk_info {
+ const char *name;
+ const char *parent_name;
+ u32 id;
+ u32 ratio_width;
+ u32 flags;
+};
+
+#define SYSCLK(i, n, p, w, f) \
+static const struct davinci_pll_sysclk_info n = { \
+ .name = #n, \
+ .parent_name = #p, \
+ .id = (i), \
+ .ratio_width = (w), \
+ .flags = (f), \
+}
+
+/** davinci_pll_obsclk_info - OBSCLK-specific info
+ * @name: The name of the clock
+ * @parent_names: Array of names of the parent clocks
+ * @num_parents: Length of @parent_names
+ * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @parent_names
+ * @ocsrc_mask: Bitmask for OCSEL[OCSRC]
+ */
+struct davinci_pll_obsclk_info {
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ u32 *table;
+ u32 ocsrc_mask;
+};
+
+struct clk *davinci_pll_clk_register(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const char *parent_name,
+ void __iomem *base);
+struct clk *davinci_pll_auxclk_register(struct device *dev,
+ const char *name,
+ void __iomem *base);
+struct clk *davinci_pll_sysclkbp_clk_register(struct device *dev,
+ const char *name,
+ void __iomem *base);
+struct clk *
+davinci_pll_obsclk_register(struct device *dev,
+ const struct davinci_pll_obsclk_info *info,
+ void __iomem *base);
+struct clk *
+davinci_pll_sysclk_register(struct device *dev,
+ const struct davinci_pll_sysclk_info *info,
+ void __iomem *base);
+
+int of_davinci_pll_init(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const struct davinci_pll_obsclk_info *obsclk_info,
+ const struct davinci_pll_sysclk_info **div_info,
+ u8 max_sysclk_id,
+ void __iomem *base);
+
+/* Platform-specific callbacks */
+
+int da830_pll_init(struct device *dev, void __iomem *base);
+
+int da850_pll0_init(struct device *dev, void __iomem *base);
+int da850_pll1_init(struct device *dev, void __iomem *base);
+int of_da850_pll0_init(struct device *dev, void __iomem *base);
+int of_da850_pll1_init(struct device *dev, void __iomem *base);
+
+int dm355_pll1_init(struct device *dev, void __iomem *base);
+int dm355_pll2_init(struct device *dev, void __iomem *base);
+
+int dm365_pll1_init(struct device *dev, void __iomem *base);
+int dm365_pll2_init(struct device *dev, void __iomem *base);
+
+int dm644x_pll1_init(struct device *dev, void __iomem *base);
+int dm644x_pll2_init(struct device *dev, void __iomem *base);
+
+int dm646x_pll1_init(struct device *dev, void __iomem *base);
+int dm646x_pll2_init(struct device *dev, void __iomem *base);
+
+#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
new file mode 100644
index 000000000000..f61abf5632ff
--- /dev/null
+++ b/drivers/clk/davinci/psc-da830.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+
+static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
+ LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, aemif, pll0_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
+ LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
+ LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
+ LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int da830_psc0_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base);
+}
+
+static struct clk_bulk_data da830_psc0_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk3" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk6" },
+};
+
+const struct davinci_psc_init_data da830_psc0_init_data = {
+ .parent_clks = da830_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks),
+ .psc_init = &da830_psc0_init,
+};
+
+LPSC_CLKDEV2(usb0_clkdev, NULL, "musb-da8xx",
+ NULL, "cppi41-dmaengine");
+LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
+LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
+LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0",
+ "fck", "ehrpwm.1");
+LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
+ "fck", "ecap.1",
+ "fck", "ecap.2");
+LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0",
+ NULL, "eqep.1");
+
+static const struct davinci_lpsc_clk_info da830_psc1_info[] = {
+ LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
+ LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
+ LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
+ LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
+ LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0),
+ LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0),
+ LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0),
+ LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0),
+ LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
+ LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0),
+ LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0),
+ LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
+ LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0),
+ LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0),
+ LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0),
+ { }
+};
+
+static int da830_psc1_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base);
+}
+
+static struct clk_bulk_data da830_psc1_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk5" },
+};
+
+const struct davinci_psc_init_data da830_psc1_init_data = {
+ .parent_clks = da830_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks),
+ .psc_init = &da830_psc1_init,
+};
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
new file mode 100644
index 000000000000..d196dcbed560
--- /dev/null
+++ b/drivers/clk/davinci/psc-da850.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DA850/OMAP-L138/AM18XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV2(emifa_clkdev, NULL, "ti-aemif",
+ "aemif", "davinci_nand.0");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+/* REVISIT: used dev_id instead of con_id */
+LPSC_CLKDEV1(arm_clkdev, "arm", NULL);
+LPSC_CLKDEV1(dsp_clkdev, NULL, "davinci-rproc.0");
+
+static const struct davinci_lpsc_clk_info da850_psc0_info[] = {
+ LPSC(0, 0, tpcc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, emifa, async1, emifa_clkdev, 0),
+ LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
+ LPSC(5, 0, mmcsd0, pll0_sysclk2, mmcsd0_clkdev, 0),
+ LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
+ LPSC(13, 0, pruss, pll0_sysclk2, NULL, 0),
+ LPSC(14, 0, arm, pll0_sysclk6, arm_clkdev, LPSC_ALWAYS_ENABLED | LPSC_SET_RATE_PARENT),
+ LPSC(15, 1, dsp, pll0_sysclk1, dsp_clkdev, LPSC_FORCE | LPSC_LOCAL_RESET),
+ { }
+};
+
+LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
+ NULL, "musb-da8xx",
+ NULL, "cppi41-dmaengine");
+LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(sata_clkdev, "fck", "ahci_da850");
+LPSC_CLKDEV1(vpif_clkdev, NULL, "vpif");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
+LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
+LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
+LPSC_CLKDEV3(ehrpwm_clkdev, "fck", "ehrpwm.0",
+ "fck", "ehrpwm.1",
+ NULL, "da830-tbclksync");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
+LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
+ "fck", "ecap.1",
+ "fck", "ecap.2");
+
+static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
+ RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
+};
+
+static int da850_psc0_init(struct device *dev, void __iomem *base)
+{
+ reset_controller_add_lookup(da850_psc0_reset_lookup_table,
+ ARRAY_SIZE(da850_psc0_reset_lookup_table));
+ return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
+}
+
+static int of_da850_psc0_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_psc_clk_init(dev, da850_psc0_info, 16, base);
+}
+
+static struct clk_bulk_data da850_psc0_parent_clks[] = {
+ { .id = "pll0_sysclk1" },
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk6" },
+ { .id = "async1" },
+};
+
+const struct davinci_psc_init_data da850_psc0_init_data = {
+ .parent_clks = da850_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc0_parent_clks),
+ .psc_init = &da850_psc0_init,
+};
+
+const struct davinci_psc_init_data of_da850_psc0_init_data = {
+ .parent_clks = da850_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc0_parent_clks),
+ .psc_init = &of_da850_psc0_init,
+};
+
+static const struct davinci_lpsc_clk_info da850_psc1_info[] = {
+ LPSC(0, 0, tpcc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
+ LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
+ LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
+ LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
+ LPSC(6, 0, ddr, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, mcasp0, async3, mcasp0_clkdev, 0),
+ LPSC(8, 0, sata, pll0_sysclk2, sata_clkdev, LPSC_FORCE),
+ LPSC(9, 0, vpif, pll0_sysclk2, vpif_clkdev, 0),
+ LPSC(10, 0, spi1, async3, spi1_clkdev, 0),
+ LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
+ LPSC(12, 0, uart1, async3, uart1_clkdev, 0),
+ LPSC(13, 0, uart2, async3, uart2_clkdev, 0),
+ LPSC(14, 0, mcbsp0, async3, mcbsp0_clkdev, 0),
+ LPSC(15, 0, mcbsp1, async3, mcbsp1_clkdev, 0),
+ LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
+ LPSC(17, 0, ehrpwm, async3, ehrpwm_clkdev, 0),
+ LPSC(18, 0, mmcsd1, pll0_sysclk2, mmcsd1_clkdev, 0),
+ LPSC(20, 0, ecap, async3, ecap_clkdev, 0),
+ LPSC(21, 0, tptc2, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int da850_psc1_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da850_psc1_info, 32, base);
+}
+
+static int of_da850_psc1_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_psc_clk_init(dev, da850_psc1_info, 32, base);
+}
+
+static struct clk_bulk_data da850_psc1_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "async3" },
+};
+
+const struct davinci_psc_init_data da850_psc1_init_data = {
+ .parent_clks = da850_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc1_parent_clks),
+ .psc_init = &da850_psc1_init,
+};
+
+const struct davinci_psc_init_data of_da850_psc1_init_data = {
+ .parent_clks = da850_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc1_parent_clks),
+ .psc_init = &of_da850_psc1_init,
+};
diff --git a/drivers/clk/davinci/psc-dm355.c b/drivers/clk/davinci/psc-dm355.c
new file mode 100644
index 000000000000..6995ecea2677
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm355.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM355
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "dm6441-mmc.1");
+LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "dm6441-mmc.0");
+LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
+
+static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
+ LPSC(0, 0, vpss_master, pll1_sysclk4, vpss_master_clkdev, 0),
+ LPSC(1, 0, vpss_slave, pll1_sysclk4, vpss_slave_clkdev, 0),
+ LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
+ LPSC(6, 0, spi1, pll1_sysclk2, spi1_clkdev, 0),
+ LPSC(7, 0, mmcsd1, pll1_sysclk2, mmcsd1_clkdev, 0),
+ LPSC(8, 0, asp1, pll1_sysclk2, NULL, 0),
+ LPSC(9, 0, usb, pll1_sysclk2, usb_clkdev, 0),
+ LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
+ LPSC(11, 0, spi2, pll1_sysclk2, spi2_clkdev, 0),
+ LPSC(12, 0, rto, pll1_auxclk, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk2, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd0, pll1_sysclk2, mmcsd0_clkdev, 0),
+ LPSC(17, 0, asp0, pll1_sysclk2, NULL, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
+ LPSC(21, 0, uart2, pll1_sysclk2, uart2_clkdev, 0),
+ LPSC(22, 0, spi0, pll1_sysclk2, spi0_clkdev, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk2, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(40, 0, mjcp, pll1_sysclk1, NULL, 0),
+ LPSC(41, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
+ { }
+};
+
+static int dm355_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm355_psc_info, 42, base);
+}
+
+static struct clk_bulk_data dm355_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm355_psc_init_data = {
+ .parent_clks = dm355_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm355_psc_parent_clks),
+ .psc_init = &dm355_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
new file mode 100644
index 000000000000..3ad915f37376
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm365.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM365
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
+LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+LPSC_CLKDEV1(spi3_clkdev, NULL, "spi_davinci.3");
+LPSC_CLKDEV1(spi4_clkdev, NULL, "spi_davinci.4");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(voice_codec_clkdev, NULL, "davinci_voicecodec");
+LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+
+static const struct davinci_lpsc_clk_info dm365_psc_info[] = {
+ LPSC(1, 0, vpss_slave, pll1_sysclk5, vpss_slave_clkdev, 0),
+ LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
+ LPSC(6, 0, spi1, pll1_sysclk4, spi1_clkdev, 0),
+ LPSC(7, 0, mmcsd1, pll1_sysclk4, mmcsd1_clkdev, 0),
+ LPSC(8, 0, asp0, pll1_sysclk4, asp0_clkdev, 0),
+ LPSC(9, 0, usb, pll1_auxclk, usb_clkdev, 0),
+ LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
+ LPSC(11, 0, spi2, pll1_sysclk4, spi2_clkdev, 0),
+ LPSC(12, 0, rto, pll1_sysclk4, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk4, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd0, pll1_sysclk8, mmcsd0_clkdev, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_sysclk4, uart1_clkdev, 0),
+ LPSC(22, 0, spi0, pll1_sysclk4, spi0_clkdev, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk4, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll2_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(38, 0, spi3, pll1_sysclk4, spi3_clkdev, 0),
+ LPSC(39, 0, spi4, pll1_auxclk, spi4_clkdev, 0),
+ LPSC(40, 0, emac, pll2_sysclk4, emac_clkdev, 0),
+ LPSC(44, 1, voice_codec, pll1_sysclk3, voice_codec_clkdev, 0),
+ LPSC(46, 1, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
+ LPSC(47, 0, vpss_master, pll1_sysclk5, vpss_master_clkdev, 0),
+ LPSC(50, 0, mjcp, pll1_sysclk3, NULL, 0),
+ { }
+};
+
+static int dm365_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm365_psc_info, 52, base);
+}
+
+static struct clk_bulk_data dm365_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_sysclk5" },
+ { .id = "pll1_sysclk8" },
+ { .id = "pll2_sysclk2" },
+ { .id = "pll2_sysclk4" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm365_psc_init_data = {
+ .parent_clks = dm365_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm365_psc_parent_clks),
+ .psc_init = &dm365_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
new file mode 100644
index 000000000000..c22367baa46f
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm644x.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM644x
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
+LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+
+static const struct davinci_lpsc_clk_info dm644x_psc_info[] = {
+ LPSC(0, 0, vpss_master, pll1_sysclk3, vpss_master_clkdev, 0),
+ LPSC(1, 0, vpss_slave, pll1_sysclk3, vpss_slave_clkdev, 0),
+ LPSC(6, 0, emac, pll1_sysclk5, emac_clkdev, 0),
+ LPSC(9, 0, usb, pll1_sysclk5, usb_clkdev, 0),
+ LPSC(10, 0, ide, pll1_sysclk5, ide_clkdev, 0),
+ LPSC(11, 0, vlynq, pll1_sysclk5, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk5, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd, pll1_sysclk5, mmcsd_clkdev, 0),
+ LPSC(17, 0, asp0, pll1_sysclk5, asp0_clkdev, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
+ LPSC(21, 0, uart2, pll1_auxclk, uart2_clkdev, 0),
+ LPSC(22, 0, spi, pll1_sysclk5, NULL, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk5, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(39, 1, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(40, 1, vicp, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int dm644x_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm644x_psc_info, 41, base);
+}
+
+static struct clk_bulk_data dm644x_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk5" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm644x_psc_init_data = {
+ .parent_clks = dm644x_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm644x_psc_parent_clks),
+ .psc_init = &dm644x_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
new file mode 100644
index 000000000000..468ef86ea40b
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm646x.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM646x
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+
+static const struct davinci_lpsc_clk_info dm646x_psc_info[] = {
+ LPSC(0, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(1, 0, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(4, 0, edma_cc, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(5, 0, edma_tc0, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(6, 0, edma_tc1, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, edma_tc2, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(8, 0, edma_tc3, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(10, 0, ide, pll1_sysclk4, ide_clkdev, 0),
+ LPSC(14, 0, emac, pll1_sysclk3, emac_clkdev, 0),
+ LPSC(16, 0, vpif0, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(17, 0, vpif1, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(21, 0, aemif, pll1_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(22, 0, mcasp0, pll1_sysclk3, mcasp0_clkdev, 0),
+ LPSC(23, 0, mcasp1, pll1_sysclk3, mcasp1_clkdev, 0),
+ LPSC(26, 0, uart0, aux_clkin, uart0_clkdev, 0),
+ LPSC(27, 0, uart1, aux_clkin, uart1_clkdev, 0),
+ LPSC(28, 0, uart2, aux_clkin, uart2_clkdev, 0),
+ /* REVIST: disabling hangs system */
+ LPSC(29, 0, pwm0, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVIST: disabling hangs system */
+ LPSC(30, 0, pwm1, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, i2c, pll1_sysclk3, i2c_clkdev, 0),
+ LPSC(33, 0, gpio, pll1_sysclk3, gpio_clkdev, 0),
+ LPSC(34, 0, timer0, pll1_sysclk3, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(35, 0, timer1, pll1_sysclk3, NULL, 0),
+ { }
+};
+
+static int dm646x_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm646x_psc_info, 46, base);
+}
+
+static struct clk_bulk_data dm646x_psc_parent_clks[] = {
+ { .id = "ref_clk" },
+ { .id = "aux_clkin" },
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_sysclk5" },
+};
+
+const struct davinci_psc_init_data dm646x_psc_init_data = {
+ .parent_clks = dm646x_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm646x_psc_parent_clks),
+ .psc_init = &dm646x_psc_init,
+};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
new file mode 100644
index 000000000000..ce170e600f09
--- /dev/null
+++ b/drivers/clk/davinci/psc.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2017 David Lechner <david@lechnology.com>
+ *
+ * Based on: drivers/clk/keystone/gate.c
+ * Copyright (C) 2013 Texas Instruments.
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * And: arch/arm/mach-davinci/psc.c
+ * Copyright (C) 2006 Texas Instruments.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+/* PSC register offsets */
+#define EPCPR 0x070
+#define PTCMD 0x120
+#define PTSTAT 0x128
+#define PDSTAT(n) (0x200 + 4 * (n))
+#define PDCTL(n) (0x300 + 4 * (n))
+#define MDSTAT(n) (0x800 + 4 * (n))
+#define MDCTL(n) (0xa00 + 4 * (n))
+
+/* PSC module states */
+enum davinci_lpsc_state {
+ LPSC_STATE_SWRSTDISABLE = 0,
+ LPSC_STATE_SYNCRST = 1,
+ LPSC_STATE_DISABLE = 2,
+ LPSC_STATE_ENABLE = 3,
+};
+
+#define MDSTAT_STATE_MASK GENMASK(5, 0)
+#define MDSTAT_MCKOUT BIT(12)
+#define PDSTAT_STATE_MASK GENMASK(4, 0)
+#define MDCTL_FORCE BIT(31)
+#define MDCTL_LRESET BIT(8)
+#define PDCTL_EPCGOOD BIT(8)
+#define PDCTL_NEXT BIT(0)
+
+struct davinci_psc_data {
+ struct clk_onecell_data clk_data;
+ struct genpd_onecell_data pm_data;
+ struct reset_controller_dev rcdev;
+};
+
+/**
+ * struct davinci_lpsc_clk - LPSC clock structure
+ * @dev: the device that provides this LPSC
+ * @hw: clk_hw for the LPSC
+ * @pm_domain: power domain for the LPSC
+ * @genpd_clk: clock reference owned by @pm_domain
+ * @regmap: PSC MMIO region
+ * @md: Module domain (LPSC module id)
+ * @pd: Power domain
+ * @flags: LPSC_* quirk flags
+ */
+struct davinci_lpsc_clk {
+ struct device *dev;
+ struct clk_hw hw;
+ struct generic_pm_domain pm_domain;
+ struct clk *genpd_clk;
+ struct regmap *regmap;
+ u32 md;
+ u32 pd;
+ u32 flags;
+};
+
+#define to_davinci_psc_data(x) container_of(x, struct davinci_psc_data, x)
+#define to_davinci_lpsc_clk(x) container_of(x, struct davinci_lpsc_clk, x)
+
+/**
+ * best_dev_name - get the "best" device name.
+ * @dev: the device
+ *
+ * Returns the device tree compatible name if the device has a DT node,
+ * otherwise return the device name. This is mainly needed because clkdev
+ * lookups are limited to 20 chars for dev_id and when using device tree,
+ * dev_name(dev) is much longer than that.
+ */
+static inline const char *best_dev_name(struct device *dev)
+{
+ const char *compatible;
+
+ if (!of_property_read_string(dev->of_node, "compatible", &compatible))
+ return compatible;
+
+ return dev_name(dev);
+}
+
+static void davinci_lpsc_config(struct davinci_lpsc_clk *lpsc,
+ enum davinci_lpsc_state next_state)
+{
+ u32 epcpr, pdstat, mdstat, ptstat;
+
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDSTAT_STATE_MASK,
+ next_state);
+
+ if (lpsc->flags & LPSC_FORCE)
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_FORCE,
+ MDCTL_FORCE);
+
+ regmap_read(lpsc->regmap, PDSTAT(lpsc->pd), &pdstat);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ regmap_write_bits(lpsc->regmap, PDCTL(lpsc->pd), PDCTL_NEXT,
+ PDCTL_NEXT);
+
+ regmap_write(lpsc->regmap, PTCMD, BIT(lpsc->pd));
+
+ regmap_read_poll_timeout(lpsc->regmap, EPCPR, epcpr,
+ epcpr & BIT(lpsc->pd), 0, 0);
+
+ regmap_write_bits(lpsc->regmap, PDCTL(lpsc->pd), PDCTL_EPCGOOD,
+ PDCTL_EPCGOOD);
+ } else {
+ regmap_write(lpsc->regmap, PTCMD, BIT(lpsc->pd));
+ }
+
+ regmap_read_poll_timeout(lpsc->regmap, PTSTAT, ptstat,
+ !(ptstat & BIT(lpsc->pd)), 0, 0);
+
+ regmap_read_poll_timeout(lpsc->regmap, MDSTAT(lpsc->md), mdstat,
+ (mdstat & MDSTAT_STATE_MASK) == next_state,
+ 0, 0);
+}
+
+static int davinci_lpsc_clk_enable(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+
+ davinci_lpsc_config(lpsc, LPSC_STATE_ENABLE);
+
+ return 0;
+}
+
+static void davinci_lpsc_clk_disable(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+
+ davinci_lpsc_config(lpsc, LPSC_STATE_DISABLE);
+}
+
+static int davinci_lpsc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+ u32 mdstat;
+
+ regmap_read(lpsc->regmap, MDSTAT(lpsc->md), &mdstat);
+
+ return (mdstat & MDSTAT_MCKOUT) ? 1 : 0;
+}
+
+static const struct clk_ops davinci_lpsc_clk_ops = {
+ .enable = davinci_lpsc_clk_enable,
+ .disable = davinci_lpsc_clk_disable,
+ .is_enabled = davinci_lpsc_clk_is_enabled,
+};
+
+static int davinci_psc_genpd_attach_dev(struct generic_pm_domain *pm_domain,
+ struct device *dev)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(pm_domain);
+ struct clk *clk;
+ int ret;
+
+ /*
+ * pm_clk_remove_clk() will call clk_put(), so we have to use clk_get()
+ * to get the clock instead of using lpsc->hw.clk directly.
+ */
+ clk = clk_get_sys(best_dev_name(lpsc->dev), clk_hw_get_name(&lpsc->hw));
+ if (IS_ERR(clk))
+ return (PTR_ERR(clk));
+
+ ret = pm_clk_create(dev);
+ if (ret < 0)
+ goto fail_clk_put;
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret < 0)
+ goto fail_pm_clk_destroy;
+
+ lpsc->genpd_clk = clk;
+
+ return 0;
+
+fail_pm_clk_destroy:
+ pm_clk_destroy(dev);
+fail_clk_put:
+ clk_put(clk);
+
+ return ret;
+}
+
+static void davinci_psc_genpd_detach_dev(struct generic_pm_domain *pm_domain,
+ struct device *dev)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(pm_domain);
+
+ pm_clk_remove_clk(dev, lpsc->genpd_clk);
+ pm_clk_destroy(dev);
+
+ lpsc->genpd_clk = NULL;
+}
+
+/**
+ * davinci_lpsc_clk_register - register LPSC clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @regmap: PSC MMIO region
+ * @md: local PSC number
+ * @pd: power domain
+ * @flags: LPSC_* flags
+ */
+static struct davinci_lpsc_clk *
+davinci_lpsc_clk_register(struct device *dev, const char *name,
+ const char *parent_name, struct regmap *regmap,
+ u32 md, u32 pd, u32 flags)
+{
+ struct clk_init_data init;
+ struct davinci_lpsc_clk *lpsc;
+ int ret;
+ bool is_on;
+
+ lpsc = devm_kzalloc(dev, sizeof(*lpsc), GFP_KERNEL);
+ if (!lpsc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &davinci_lpsc_clk_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ if (flags & LPSC_ALWAYS_ENABLED)
+ init.flags |= CLK_IS_CRITICAL;
+
+ if (flags & LPSC_SET_RATE_PARENT)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ lpsc->dev = dev;
+ lpsc->regmap = regmap;
+ lpsc->hw.init = &init;
+ lpsc->md = md;
+ lpsc->pd = pd;
+ lpsc->flags = flags;
+
+ ret = devm_clk_hw_register(dev, &lpsc->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* genpd attach needs a way to look up this clock */
+ ret = clk_hw_register_clkdev(&lpsc->hw, name, best_dev_name(dev));
+
+ lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
+ best_dev_name(dev), name);
+ lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
+ lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
+ lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
+
+ is_on = davinci_lpsc_clk_is_enabled(&lpsc->hw);
+ pm_genpd_init(&lpsc->pm_domain, NULL, is_on);
+
+ return lpsc;
+}
+
+static int davinci_lpsc_clk_reset(struct clk *clk, bool reset)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+ u32 mdctl;
+
+ if (IS_ERR_OR_NULL(lpsc))
+ return -EINVAL;
+
+ mdctl = reset ? 0 : MDCTL_LRESET;
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_LRESET, mdctl);
+
+ return 0;
+}
+
+/*
+ * REVISIT: These exported functions can be removed after a non-DT lookup is
+ * added to the reset controller framework and the davinci-rproc driver is
+ * updated to use the generic reset controller framework.
+ */
+
+int davinci_clk_reset_assert(struct clk *clk)
+{
+ return davinci_lpsc_clk_reset(clk, true);
+}
+EXPORT_SYMBOL(davinci_clk_reset_assert);
+
+int davinci_clk_reset_deassert(struct clk *clk)
+{
+ return davinci_lpsc_clk_reset(clk, false);
+}
+EXPORT_SYMBOL(davinci_clk_reset_deassert);
+
+static int davinci_psc_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct davinci_psc_data *psc = to_davinci_psc_data(rcdev);
+ struct clk *clk = psc->clk_data.clks[id];
+
+ return davinci_lpsc_clk_reset(clk, true);
+}
+
+static int davinci_psc_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct davinci_psc_data *psc = to_davinci_psc_data(rcdev);
+ struct clk *clk = psc->clk_data.clks[id];
+
+ return davinci_lpsc_clk_reset(clk, false);
+}
+
+static const struct reset_control_ops davinci_psc_reset_ops = {
+ .assert = davinci_psc_reset_assert,
+ .deassert = davinci_psc_reset_deassert,
+};
+
+static int davinci_psc_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct of_phandle_args clkspec = *reset_spec; /* discard const qualifier */
+ struct clk *clk;
+ struct clk_hw *hw;
+ struct davinci_lpsc_clk *lpsc;
+
+ /* the clock node is the same as the reset node */
+ clk = of_clk_get_from_provider(&clkspec);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw = __clk_get_hw(clk);
+ lpsc = to_davinci_lpsc_clk(hw);
+ clk_put(clk);
+
+ /* not all modules support local reset */
+ if (!(lpsc->flags & LPSC_LOCAL_RESET))
+ return -EINVAL;
+
+ return lpsc->md;
+}
+
+static const struct regmap_config davinci_psc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static struct davinci_psc_data *
+__davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ int num_clks,
+ void __iomem *base)
+{
+ struct davinci_psc_data *psc;
+ struct clk **clks;
+ struct generic_pm_domain **pm_domains;
+ struct regmap *regmap;
+ int i, ret;
+
+ psc = devm_kzalloc(dev, sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ clks = devm_kmalloc_array(dev, num_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return ERR_PTR(-ENOMEM);
+
+ psc->clk_data.clks = clks;
+ psc->clk_data.clk_num = num_clks;
+
+ /*
+ * init array with error so that of_clk_src_onecell_get() doesn't
+ * return NULL for gaps in the sparse array
+ */
+ for (i = 0; i < num_clks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ pm_domains = devm_kcalloc(dev, num_clks, sizeof(*pm_domains), GFP_KERNEL);
+ if (!pm_domains)
+ return ERR_PTR(-ENOMEM);
+
+ psc->pm_data.domains = pm_domains;
+ psc->pm_data.num_domains = num_clks;
+
+ regmap = devm_regmap_init_mmio(dev, base, &davinci_psc_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ for (; info->name; info++) {
+ struct davinci_lpsc_clk *lpsc;
+
+ lpsc = davinci_lpsc_clk_register(dev, info->name, info->parent,
+ regmap, info->md, info->pd,
+ info->flags);
+ if (IS_ERR(lpsc)) {
+ dev_warn(dev, "Failed to register %s (%ld)\n",
+ info->name, PTR_ERR(lpsc));
+ continue;
+ }
+
+ clks[info->md] = lpsc->hw.clk;
+ pm_domains[info->md] = &lpsc->pm_domain;
+ }
+
+ psc->rcdev.ops = &davinci_psc_reset_ops;
+ psc->rcdev.owner = THIS_MODULE;
+ psc->rcdev.dev = dev;
+ psc->rcdev.of_node = dev->of_node;
+ psc->rcdev.of_reset_n_cells = 1;
+ psc->rcdev.of_xlate = davinci_psc_reset_of_xlate;
+ psc->rcdev.nr_resets = num_clks;
+
+ ret = devm_reset_controller_register(dev, &psc->rcdev);
+ if (ret < 0)
+ dev_warn(dev, "Failed to register reset controller (%d)\n", ret);
+
+ return psc;
+}
+
+int davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base)
+{
+ struct davinci_psc_data *psc;
+
+ psc = __davinci_psc_register_clocks(dev, info, num_clks, base);
+ if (IS_ERR(psc))
+ return PTR_ERR(psc);
+
+ for (; info->name; info++) {
+ const struct davinci_lpsc_clkdev_info *cdevs = info->cdevs;
+ struct clk *clk = psc->clk_data.clks[info->md];
+
+ if (!cdevs || IS_ERR_OR_NULL(clk))
+ continue;
+
+ for (; cdevs->con_id || cdevs->dev_id; cdevs++)
+ clk_register_clkdev(clk, cdevs->con_id, cdevs->dev_id);
+ }
+
+ return 0;
+}
+
+int of_davinci_psc_clk_init(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base)
+{
+ struct device_node *node = dev->of_node;
+ struct davinci_psc_data *psc;
+
+ psc = __davinci_psc_register_clocks(dev, info, num_clks, base);
+ if (IS_ERR(psc))
+ return PTR_ERR(psc);
+
+ of_genpd_add_provider_onecell(node, &psc->pm_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, &psc->clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id davinci_psc_of_match[] = {
+ { .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
+ { .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
+ { }
+};
+
+static const struct platform_device_id davinci_psc_id_table[] = {
+ { .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
+ { .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
+ { .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
+ { .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
+ { .name = "dm355-psc", .driver_data = (kernel_ulong_t)&dm355_psc_init_data },
+ { .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
+ { .name = "dm644x-psc", .driver_data = (kernel_ulong_t)&dm644x_psc_init_data },
+ { .name = "dm646x-psc", .driver_data = (kernel_ulong_t)&dm646x_psc_init_data },
+ { }
+};
+
+static int davinci_psc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct davinci_psc_init_data *init_data = NULL;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ of_id = of_match_device(davinci_psc_of_match, dev);
+ if (of_id)
+ init_data = of_id->data;
+ else if (pdev->id_entry)
+ init_data = (void *)pdev->id_entry->driver_data;
+
+ if (!init_data) {
+ dev_err(dev, "unable to find driver init data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = devm_clk_bulk_get(dev, init_data->num_parent_clks,
+ init_data->parent_clks);
+ if (ret < 0)
+ return ret;
+
+ return init_data->psc_init(dev, base);
+}
+
+static struct platform_driver davinci_psc_driver = {
+ .probe = davinci_psc_probe,
+ .driver = {
+ .name = "davinci-psc-clk",
+ .of_match_table = davinci_psc_of_match,
+ },
+ .id_table = davinci_psc_id_table,
+};
+
+static int __init davinci_psc_driver_init(void)
+{
+ return platform_driver_register(&davinci_psc_driver);
+}
+
+/* has to be postcore_initcall because davinci_gpio depend on PSC clocks */
+postcore_initcall(davinci_psc_driver_init);
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
new file mode 100644
index 000000000000..c2a7df6413fe
--- /dev/null
+++ b/drivers/clk/davinci/psc.h
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __CLK_DAVINCI_PSC_H__
+#define __CLK_DAVINCI_PSC_H__
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+
+/* PSC quirk flags */
+#define LPSC_ALWAYS_ENABLED BIT(0) /* never disable this clock */
+#define LPSC_SET_RATE_PARENT BIT(1) /* propagate set_rate to parent clock */
+#define LPSC_FORCE BIT(2) /* requires MDCTL FORCE bit */
+#define LPSC_LOCAL_RESET BIT(3) /* acts as reset provider */
+
+struct davinci_lpsc_clkdev_info {
+ const char *con_id;
+ const char *dev_id;
+};
+
+#define LPSC_CLKDEV(c, d) { \
+ .con_id = (c), \
+ .dev_id = (d) \
+}
+
+#define LPSC_CLKDEV1(n, c, d) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c), (d)), \
+ { } \
+}
+
+#define LPSC_CLKDEV2(n, c1, d1, c2, d2) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c1), (d1)), \
+ LPSC_CLKDEV((c2), (d2)), \
+ { } \
+}
+
+#define LPSC_CLKDEV3(n, c1, d1, c2, d2, c3, d3) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c1), (d1)), \
+ LPSC_CLKDEV((c2), (d2)), \
+ LPSC_CLKDEV((c3), (d3)), \
+ { } \
+}
+
+/**
+ * davinci_lpsc_clk_info - LPSC module-specific clock information
+ * @name: the clock name
+ * @parent: the parent clock name
+ * @cdevs: optional array of clkdev lookup table info
+ * @md: the local module domain (LPSC id)
+ * @pd: the power domain id
+ * @flags: bitmask of LPSC_* flags
+ */
+struct davinci_lpsc_clk_info {
+ const char *name;
+ const char *parent;
+ const struct davinci_lpsc_clkdev_info *cdevs;
+ u32 md;
+ u32 pd;
+ unsigned long flags;
+};
+
+#define LPSC(m, d, n, p, c, f) \
+{ \
+ .name = #n, \
+ .parent = #p, \
+ .cdevs = (c), \
+ .md = (m), \
+ .pd = (d), \
+ .flags = (f), \
+}
+
+int davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base);
+
+int of_davinci_psc_clk_init(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base);
+
+/* Device-specific data */
+
+struct davinci_psc_init_data {
+ struct clk_bulk_data *parent_clks;
+ int num_parent_clks;
+ int (*psc_init)(struct device *dev, void __iomem *base);
+};
+
+extern const struct davinci_psc_init_data da830_psc0_init_data;
+extern const struct davinci_psc_init_data da830_psc1_init_data;
+extern const struct davinci_psc_init_data da850_psc0_init_data;
+extern const struct davinci_psc_init_data da850_psc1_init_data;
+extern const struct davinci_psc_init_data of_da850_psc0_init_data;
+extern const struct davinci_psc_init_data of_da850_psc1_init_data;
+extern const struct davinci_psc_init_data dm355_psc_init_data;
+extern const struct davinci_psc_init_data dm365_psc_init_data;
+extern const struct davinci_psc_init_data dm644x_psc_init_data;
+extern const struct davinci_psc_init_data dm646x_psc_init_data;
+
+#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 4806fc2cb4ac..2a714c0f9657 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -3,7 +3,7 @@
# Hisilicon Clock specific Makefile
#
-obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
+obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o clk-hisi-phase.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c
new file mode 100644
index 000000000000..5bce9297b78b
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hisi-phase.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Simple HiSilicon phase clock implementation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+struct clk_hisi_phase {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 *phase_degrees;
+ u32 *phase_regvals;
+ u8 phase_num;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define to_clk_hisi_phase(_hw) container_of(_hw, struct clk_hisi_phase, hw)
+
+static int hisi_phase_regval_to_degrees(struct clk_hisi_phase *phase,
+ u32 regval)
+{
+ int i;
+
+ for (i = 0; i < phase->phase_num; i++)
+ if (phase->phase_regvals[i] == regval)
+ return phase->phase_degrees[i];
+
+ return -EINVAL;
+}
+
+static int hisi_clk_get_phase(struct clk_hw *hw)
+{
+ struct clk_hisi_phase *phase = to_clk_hisi_phase(hw);
+ u32 regval;
+
+ regval = readl(phase->reg);
+ regval = (regval & phase->mask) >> phase->shift;
+
+ return hisi_phase_regval_to_degrees(phase, regval);
+}
+
+static int hisi_phase_degrees_to_regval(struct clk_hisi_phase *phase,
+ int degrees)
+{
+ int i;
+
+ for (i = 0; i < phase->phase_num; i++)
+ if (phase->phase_degrees[i] == degrees)
+ return phase->phase_regvals[i];
+
+ return -EINVAL;
+}
+
+static int hisi_clk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_hisi_phase *phase = to_clk_hisi_phase(hw);
+ unsigned long flags = 0;
+ int regval;
+ u32 val;
+
+ regval = hisi_phase_degrees_to_regval(phase, degrees);
+ if (regval < 0)
+ return regval;
+
+ spin_lock_irqsave(phase->lock, flags);
+
+ val = clk_readl(phase->reg);
+ val &= ~phase->mask;
+ val |= regval << phase->shift;
+ clk_writel(val, phase->reg);
+
+ spin_unlock_irqrestore(phase->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_phase_ops = {
+ .get_phase = hisi_clk_get_phase,
+ .set_phase = hisi_clk_set_phase,
+};
+
+struct clk *clk_register_hisi_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ void __iomem *base, spinlock_t *lock)
+{
+ struct clk_hisi_phase *phase;
+ struct clk_init_data init;
+
+ phase = devm_kzalloc(dev, sizeof(struct clk_hisi_phase), GFP_KERNEL);
+ if (!phase)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clks->name;
+ init.ops = &clk_phase_ops;
+ init.flags = clks->flags | CLK_IS_BASIC;
+ init.parent_names = clks->parent_names ? &clks->parent_names : NULL;
+ init.num_parents = clks->parent_names ? 1 : 0;
+
+ phase->reg = base + clks->offset;
+ phase->shift = clks->shift;
+ phase->mask = (BIT(clks->width) - 1) << clks->shift;
+ phase->lock = lock;
+ phase->phase_degrees = clks->phase_degrees;
+ phase->phase_regvals = clks->phase_regvals;
+ phase->phase_num = clks->phase_num;
+ phase->hw.init = &init;
+
+ return devm_clk_register(dev, &phase->hw);
+}
+EXPORT_SYMBOL_GPL(clk_register_hisi_phase);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index b73c1dfae7f1..953c8dacef8b 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -49,6 +49,8 @@ struct hisi_clock_data *hisi_clk_alloc(struct platform_device *pdev,
return NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
clk_data->base = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!clk_data->base)
@@ -197,6 +199,30 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_clk_register_mux);
+int hisi_clk_register_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ int nums, struct hisi_clock_data *data)
+{
+ void __iomem *base = data->base;
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_hisi_phase(dev, &clks[i], base,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ return PTR_ERR(clk);
+ }
+
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_clk_register_phase);
+
int hisi_clk_register_divider(const struct hisi_divider_clock *clks,
int nums, struct hisi_clock_data *data)
{
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index 4e1d1affc6f5..8d7ee5c3231b 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -68,6 +68,19 @@ struct hisi_mux_clock {
const char *alias;
};
+struct hisi_phase_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_names;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u32 *phase_degrees;
+ u32 *phase_regvals;
+ u8 phase_num;
+};
+
struct hisi_divider_clock {
unsigned int id;
const char *name;
@@ -120,6 +133,12 @@ int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *,
int, struct hisi_clock_data *);
int hisi_clk_register_mux(const struct hisi_mux_clock *, int,
struct hisi_clock_data *);
+struct clk *clk_register_hisi_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ void __iomem *base, spinlock_t *lock);
+int hisi_clk_register_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ int nums, struct hisi_clock_data *data);
int hisi_clk_register_divider(const struct hisi_divider_clock *,
int, struct hisi_clock_data *);
int hisi_clk_register_gate(const struct hisi_gate_clock *,
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index 2007123832bb..53450b651e4c 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
/* hi3516CV300 sysctrl CRG */
#define HI3516CV300_SYSCTRL_NR_CLKS 16
-static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
+static const char *const wdt_mux_p[] __initconst = { "3m", "apb" };
static u32 wdt_mux_table[] = {0, 1};
static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index 8478948e858e..743eec131528 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -27,30 +27,31 @@
#include "reset.h"
/* hi3798CV200 core CRG */
-#define HI3798CV200_INNER_CLK_OFFSET 64
-#define HI3798CV200_FIXED_24M 65
-#define HI3798CV200_FIXED_25M 66
-#define HI3798CV200_FIXED_50M 67
-#define HI3798CV200_FIXED_75M 68
-#define HI3798CV200_FIXED_100M 69
-#define HI3798CV200_FIXED_150M 70
-#define HI3798CV200_FIXED_200M 71
-#define HI3798CV200_FIXED_250M 72
-#define HI3798CV200_FIXED_300M 73
-#define HI3798CV200_FIXED_400M 74
-#define HI3798CV200_MMC_MUX 75
-#define HI3798CV200_ETH_PUB_CLK 76
-#define HI3798CV200_ETH_BUS_CLK 77
-#define HI3798CV200_ETH_BUS0_CLK 78
-#define HI3798CV200_ETH_BUS1_CLK 79
-#define HI3798CV200_COMBPHY1_MUX 80
-#define HI3798CV200_FIXED_12M 81
-#define HI3798CV200_FIXED_48M 82
-#define HI3798CV200_FIXED_60M 83
-#define HI3798CV200_FIXED_166P5M 84
-#define HI3798CV200_SDIO0_MUX 85
-
-#define HI3798CV200_CRG_NR_CLKS 128
+#define HI3798CV200_INNER_CLK_OFFSET 64
+#define HI3798CV200_FIXED_24M 65
+#define HI3798CV200_FIXED_25M 66
+#define HI3798CV200_FIXED_50M 67
+#define HI3798CV200_FIXED_75M 68
+#define HI3798CV200_FIXED_100M 69
+#define HI3798CV200_FIXED_150M 70
+#define HI3798CV200_FIXED_200M 71
+#define HI3798CV200_FIXED_250M 72
+#define HI3798CV200_FIXED_300M 73
+#define HI3798CV200_FIXED_400M 74
+#define HI3798CV200_MMC_MUX 75
+#define HI3798CV200_ETH_PUB_CLK 76
+#define HI3798CV200_ETH_BUS_CLK 77
+#define HI3798CV200_ETH_BUS0_CLK 78
+#define HI3798CV200_ETH_BUS1_CLK 79
+#define HI3798CV200_COMBPHY1_MUX 80
+#define HI3798CV200_FIXED_12M 81
+#define HI3798CV200_FIXED_48M 82
+#define HI3798CV200_FIXED_60M 83
+#define HI3798CV200_FIXED_166P5M 84
+#define HI3798CV200_SDIO0_MUX 85
+#define HI3798CV200_COMBPHY0_MUX 86
+
+#define HI3798CV200_CRG_NR_CLKS 128
static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
{ HISTB_OSC_CLK, "clk_osc", NULL, 0, 24000000, },
@@ -74,9 +75,9 @@ static const char *const mmc_mux_p[] = {
"100m", "50m", "25m", "200m", "150m" };
static u32 mmc_mux_table[] = {0, 1, 2, 3, 6};
-static const char *const comphy1_mux_p[] = {
+static const char *const comphy_mux_p[] = {
"100m", "25m"};
-static u32 comphy1_mux_table[] = {2, 3};
+static u32 comphy_mux_table[] = {2, 3};
static const char *const sdio_mux_p[] = {
"100m", "50m", "150m", "166p5m" };
@@ -85,14 +86,29 @@ static u32 sdio_mux_table[] = {0, 1, 2, 3};
static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
{ HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
+ { HI3798CV200_COMBPHY0_MUX, "combphy0_mux",
+ comphy_mux_p, ARRAY_SIZE(comphy_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 2, 2, 0, comphy_mux_table, },
{ HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
- comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
- CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+ comphy_mux_p, ARRAY_SIZE(comphy_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy_mux_table, },
{ HI3798CV200_SDIO0_MUX, "sdio0_mux", sdio_mux_p,
ARRAY_SIZE(sdio_mux_p), CLK_SET_RATE_PARENT,
0x9c, 8, 2, 0, sdio_mux_table, },
};
+static u32 mmc_phase_regvals[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static u32 mmc_phase_degrees[] = {0, 45, 90, 135, 180, 225, 270, 315};
+
+static struct hisi_phase_clock hi3798cv200_phase_clks[] = {
+ { HISTB_MMC_SAMPLE_CLK, "mmc_sample", "clk_mmc_ciu",
+ CLK_SET_RATE_PARENT, 0xa0, 12, 3, mmc_phase_degrees,
+ mmc_phase_regvals, ARRAY_SIZE(mmc_phase_regvals) },
+ { HISTB_MMC_DRV_CLK, "mmc_drive", "clk_mmc_ciu",
+ CLK_SET_RATE_PARENT, 0xa0, 16, 3, mmc_phase_degrees,
+ mmc_phase_regvals, ARRAY_SIZE(mmc_phase_regvals) },
+};
+
static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
/* UART */
{ HISTB_UART2_CLK, "clk_uart2", "75m",
@@ -147,6 +163,9 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xcc, 4, 0, },
{ HISTB_ETH1_MACIF_CLK, "clk_macif1", "clk_bus_m1",
CLK_SET_RATE_PARENT, 0xcc, 25, 0, },
+ /* COMBPHY0 */
+ { HISTB_COMBPHY0_CLK, "clk_combphy0", "combphy0_mux",
+ CLK_SET_RATE_PARENT, 0x188, 0, 0, },
/* COMBPHY1 */
{ HISTB_COMBPHY1_CLK, "clk_combphy1", "combphy1_mux",
CLK_SET_RATE_PARENT, 0x188, 8, 0, },
@@ -161,6 +180,8 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xb8, 1, 0 },
{ HISTB_USB2_UTMI_CLK, "clk_u2_utmi", "60m",
CLK_SET_RATE_PARENT, 0xb8, 5, 0 },
+ { HISTB_USB2_OTG_UTMI_CLK, "clk_u2_otg_utmi", "60m",
+ CLK_SET_RATE_PARENT, 0xb8, 3, 0 },
{ HISTB_USB2_PHY1_REF_CLK, "clk_u2_phy1_ref", "24m",
CLK_SET_RATE_PARENT, 0xbc, 0, 0 },
{ HISTB_USB2_PHY2_REF_CLK, "clk_u2_phy2_ref", "24m",
@@ -177,6 +198,14 @@ static struct hisi_clock_data *hi3798cv200_clk_register(
if (!clk_data)
return ERR_PTR(-ENOMEM);
+ /* hisi_phase_clock is resource managed */
+ ret = hisi_clk_register_phase(&pdev->dev,
+ hi3798cv200_phase_clks,
+ ARRAY_SIZE(hi3798cv200_phase_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
ret = hisi_clk_register_fixed_rate(hi3798cv200_fixed_rate_clks,
ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
clk_data);
@@ -202,18 +231,17 @@ static struct hisi_clock_data *hi3798cv200_clk_register(
return clk_data;
-unregister_fixed_rate:
- hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
- ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
clk_data);
-
unregister_mux:
hisi_clk_unregister_mux(hi3798cv200_mux_clks,
ARRAY_SIZE(hi3798cv200_mux_clks),
clk_data);
-unregister_gate:
- hisi_clk_unregister_gate(hi3798cv200_gate_clks,
- ARRAY_SIZE(hi3798cv200_gate_clks),
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
clk_data);
return ERR_PTR(ret);
}
@@ -245,7 +273,7 @@ static const struct hisi_crg_funcs hi3798cv200_crg_funcs = {
#define HI3798CV200_SYSCTRL_NR_CLKS 16
static const struct hisi_gate_clock hi3798cv200_sysctrl_gate_clks[] = {
- { HISTB_IR_CLK, "clk_ir", "100m",
+ { HISTB_IR_CLK, "clk_ir", "24m",
CLK_SET_RATE_PARENT, 0x48, 4, 0, },
{ HISTB_TIMER01_CLK, "clk_timer01", "24m",
CLK_SET_RATE_PARENT, 0x48, 6, 0, },
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index f91f2b2e11cd..8c3baa7e6496 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SOC_IMX35) += clk-imx35.o
obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
+obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 6df3389687bc..99036527eb0d 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -101,7 +101,7 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
init.name = name;
init.ops = &clk_busy_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -175,7 +175,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
init.name = name;
init.ops = &clk_busy_mux_ops;
- init.flags = 0;
+ init.flags = CLK_IS_CRITICAL;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
new file mode 100644
index 000000000000..3651c77fbabe
--- /dev/null
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <dt-bindings/clock/imx6sll-clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+#define CCM_ANALOG_PLL_BYPASS (0x1 << 16)
+#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16)
+#define xPLL_CLR(offset) (offset + 0x8)
+
+static const char *pll_bypass_src_sels[] = { "osc", "dummy", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = {"periph", "axi_alt_sel", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *ssi_sels[] = {"pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", "dummy",};
+static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
+static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
+static const char *ldb_di1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
+static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *perclk_sels[] = { "ipg", "osc", };
+static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static struct clk *clks[IMX6SLL_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static const struct clk_div_table post_div_table[] = {
+ { .val = 2, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 4, },
+ { }
+};
+
+static const struct clk_div_table video_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 1, },
+ { .val = 3, .div = 4, },
+ { }
+};
+
+static u32 share_count_audio;
+static u32 share_count_ssi1;
+static u32 share_count_ssi2;
+static u32 share_count_ssi3;
+
+static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+{
+ struct device_node *np;
+ void __iomem *base;
+
+ clks[IMX6SLL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+
+ clks[IMX6SLL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
+ clks[IMX6SLL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc");
+
+ /* ipp_di clock is external input */
+ clks[IMX6SLL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0");
+ clks[IMX6SLL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* Do not bypass PLLs initially */
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x0));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x10));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x20));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x30));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x70));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xa0));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xe0));
+
+ clks[IMX6SLL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+ clks[IMX6SLL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
+ clks[IMX6SLL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
+ clks[IMX6SLL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
+ clks[IMX6SLL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
+ clks[IMX6SLL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
+ clks[IMX6SLL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
+ clks[IMX6SLL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+
+ clks[IMX6SLL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+
+ clks[IMX6SLL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
+ clks[IMX6SLL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+ clks[IMX6SLL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
+ clks[IMX6SLL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
+ clks[IMX6SLL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
+ clks[IMX6SLL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
+ clks[IMX6SLL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
+
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+ * - Do nothing for usbphy clk_enable/disable
+ * - Keep refcount when do usbphy clk_enable/disable, in that case,
+ * the clk framework many need to enable/disable usbphy's parent
+ */
+ clks[IMX6SLL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
+ clks[IMX6SLL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+ /*
+ * usbphy*_gate needs to be on after system boots up, and software
+ * never needs to control it anymore.
+ */
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clks[IMX6SLL_CLK_USBPHY1_GATE] = imx_clk_gate_flags("usbphy1_gate", "dummy", base + 0x10, 6, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_USBPHY2_GATE] = imx_clk_gate_flags("usbphy2_gate", "dummy", base + 0x20, 6, CLK_IS_CRITICAL);
+ }
+
+ /* name parent_name reg idx */
+ clks[IMX6SLL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clks[IMX6SLL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+ clks[IMX6SLL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
+ clks[IMX6SLL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3);
+ clks[IMX6SLL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
+ clks[IMX6SLL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
+ clks[IMX6SLL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
+ clks[IMX6SLL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
+
+ clks[IMX6SLL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ /* name parent_name mult div */
+ clks[IMX6SLL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+ clks[IMX6SLL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+ clks[IMX6SLL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clks[IMX6SLL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX6SLL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clks[IMX6SLL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+ clks[IMX6SLL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
+ clks[IMX6SLL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+ clks[IMX6SLL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clks[IMX6SLL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+ clks[IMX6SLL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clks[IMX6SLL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
+ clks[IMX6SLL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
+ clks[IMX6SLL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+ clks[IMX6SLL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x30, 7, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+ clks[IMX6SLL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels));
+ clks[IMX6SLL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
+ clks[IMX6SLL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clks[IMX6SLL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+ clks[IMX6SLL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+
+ clks[IMX6SLL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+ clks[IMX6SLL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+ clks[IMX6SLL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clks[IMX6SLL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6SLL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clks[IMX6SLL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
+ clks[IMX6SLL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6SLL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
+ clks[IMX6SLL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
+ clks[IMX6SLL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
+ clks[IMX6SLL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6);
+ clks[IMX6SLL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
+ clks[IMX6SLL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
+ clks[IMX6SLL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
+ clks[IMX6SLL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
+ clks[IMX6SLL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
+ clks[IMX6SLL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
+ clks[IMX6SLL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clks[IMX6SLL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_PRED] = imx_clk_divider("extern_audio_pred", "extern_audio_sel", base + 0x30, 12, 3);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_PODF] = imx_clk_divider("extern_audio_podf", "extern_audio_pred", base + 0x30, 9, 3);
+ clks[IMX6SLL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3);
+ clks[IMX6SLL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
+ clks[IMX6SLL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
+
+ clks[IMX6SLL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clks[IMX6SLL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ clks[IMX6SLL_CLK_AXI_PODF] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
+ clks[IMX6SLL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
+
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
+
+ clks[IMX6SLL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
+ clks[IMX6SLL_CLK_LDB_DI1_SEL] = imx_clk_mux("ldb_di1_sel", base + 0x1c, 7, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels));
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1_div_sel", base + 0x20, 10, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
+
+ /* CCGR0 */
+ clks[IMX6SLL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
+ clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
+ clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+
+ /* CCGR1 */
+ clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
+ clks[IMX6SLL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2);
+ clks[IMX6SLL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4);
+ clks[IMX6SLL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6);
+ clks[IMX6SLL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10);
+ clks[IMX6SLL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10);
+ clks[IMX6SLL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12);
+ clks[IMX6SLL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14);
+ clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
+ clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
+ clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+
+ /* CCGR2 */
+ clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2);
+ clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
+ clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
+ clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
+ clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
+ clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
+
+ /* CCGR3 */
+ clks[IMX6SLL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2);
+ clks[IMX6SLL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
+ clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
+ clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
+ clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
+ clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_OCRAM] = imx_clk_gate_flags("ocram","ahb", base + 0x74, 28, CLK_IS_CRITICAL);
+
+ /* CCGR4 */
+ clks[IMX6SLL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
+ clks[IMX6SLL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
+ clks[IMX6SLL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
+ clks[IMX6SLL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22);
+
+ /* CCGR5 */
+ clks[IMX6SLL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clks[IMX6SLL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
+ clks[IMX6SLL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO] = imx_clk_gate2_shared("extern_audio", "extern_audio_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1);
+ clks[IMX6SLL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
+ clks[IMX6SLL_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2);
+ clks[IMX6SLL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
+ clks[IMX6SLL_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3);
+ clks[IMX6SLL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
+ clks[IMX6SLL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24);
+ clks[IMX6SLL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24);
+
+ /* CCGR6 */
+ clks[IMX6SLL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+ clks[IMX6SLL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
+ clks[IMX6SLL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
+ clks[IMX6SLL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
+
+ /* mask handshake of mmdc */
+ writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + 0x4);
+
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ /* Lower the AHB clock rate before changing the clock source. */
+ clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000);
+
+ /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH_CLK2_SEL], clks[IMX6SLL_CLK_PLL3_USB_OTG]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_CLK2]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH_PRE], clks[IMX6SLL_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_PRE]);
+
+ clk_set_rate(clks[IMX6SLL_CLK_AHB], 132000000);
+}
+CLK_OF_DECLARE_DRIVER(imx6sll, "fsl,imx6sll-ccm", imx6sll_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index e6d389e333d7..bc3f9ebf2d9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -63,17 +63,17 @@ static const char *lcdif2_sels[] = { "lcdif2_podf", "ipp_di0", "ipp_di1", "ldb_d
static const char *display_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll3_usb_otg", "pll3_pfd1_540m", };
static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
static const char *cko1_sels[] = {
- "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
- "dummy", "ocram", "dummy", "pxp_axi", "epdc_axi", "lcdif_pix",
- "epdc_pix", "ahb", "ipg", "perclk", "ckil", "pll4_audio_div",
+ "dummy", "dummy", "dummy", "dummy",
+ "vadc", "ocram", "qspi2", "m4", "enet_ahb", "lcdif2_pix",
+ "lcdif1_pix", "ahb", "ipg", "perclk", "ckil", "pll4_audio_div",
};
static const char *cko2_sels[] = {
"dummy", "mmdc_p0_fast", "usdhc4", "usdhc1", "dummy", "wrck",
"ecspi_root", "dummy", "usdhc3", "pcie", "arm", "csi_core",
- "lcdif_axi", "dummy", "osc", "dummy", "gpu2d_ovg_core",
- "usdhc2", "ssi1", "ssi2", "ssi3", "gpu2d_core", "dummy",
- "dummy", "dummy", "dummy", "esai_extal", "eim_slow", "uart_serial",
- "spdif", "asrc", "dummy",
+ "display_axi", "dummy", "osc", "dummy", "dummy",
+ "usdhc2", "ssi1", "ssi2", "ssi3", "gpu_axi_podf", "dummy",
+ "can_podf", "lvds1_out", "qspi1", "esai_extal", "eim_slow",
+ "uart_serial", "spdif", "audio", "dummy",
};
static const char *cko_sels[] = { "cko1", "cko2", };
static const char *lvds_sels[] = {
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 85c118164469..12320118f8de 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -308,7 +308,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_SAI2_PODF] = imx_clk_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6);
clks[IMX6UL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clks[IMX6UL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3);
+ if (clk_on_imx6ul())
+ clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3);
+ else if (clk_on_imx6ull())
+ clks[IMX6ULL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3);
clks[IMX6UL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
@@ -461,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
- clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+ clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 617beb234259..975a20d3cc94 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -51,20 +51,20 @@ static const char *arm_a7_sel[] = { "osc", "pll_arm_main_clk",
static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_250m_clk", "pll_sys_pfd2_270m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
+ "pll_audio_post_div", "pll_video_post_div", "pll_sys_pfd7_clk", };
static const char *disp_axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd6_clk",
- "pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_main_clk", };
+ "pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_post_div", };
static const char *enet_axi_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk",
- "pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd4_clk", };
static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
@@ -74,8 +74,8 @@ static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk",
- "pll_enet_125m_clk", "pll_usb_main_clk", "pll_audio_post_div",
- "pll_video_main_clk", };
+ "pll_enet_250m_clk", "pll_usb_main_clk", "pll_audio_post_div",
+ "pll_video_post_div", };
static const char *dram_phym_sel[] = { "pll_dram_main_clk",
"dram_phym_alt_clk", };
@@ -86,7 +86,7 @@ static const char *dram_sel[] = { "pll_dram_main_clk",
static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
"pll_usb_main_clk", "pll_sys_pfd7_clk", "pll_audio_post_div",
- "pll_video_main_clk", };
+ "pll_video_post_div", };
static const char *dram_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
@@ -108,62 +108,62 @@ static const char *pcie_phy_sel[] = { "osc", "pll_enet_100m_clk",
static const char *epdc_pixel_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_sys_main_clk", "pll_sys_pfd5_clk",
- "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", "pll_video_main_clk", };
+ "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", "pll_video_post_div", };
static const char *lcdif_pixel_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_dram_533m_clk", "ext_clk_3", "pll_sys_pfd4_clk",
- "pll_sys_pfd2_270m_clk", "pll_video_main_clk",
+ "pll_sys_pfd2_270m_clk", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *mipi_dsi_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
+ "pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_csi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
+ "pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_dphy_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_pfd5_clk", "ref_1m_clk", "ext_clk_2",
- "pll_video_main_clk", "ext_clk_3", };
+ "pll_video_post_div", "ext_clk_3", };
static const char *sai1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai3_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_3", };
static const char *spdif_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_3_clk", };
static const char *enet1_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet1_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
- "ext_clk_4", "pll_video_main_clk", };
+ "ext_clk_4", "pll_video_post_div", };
static const char *enet2_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet2_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
- "ext_clk_4", "pll_video_main_clk", };
+ "ext_clk_4", "pll_video_post_div", };
static const char *enet_phy_ref_sel[] = { "osc", "pll_enet_25m_clk",
"pll_enet_50m_clk", "pll_enet_125m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd3_clk", };
static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -174,7 +174,7 @@ static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *nand_sel[] = { "osc", "pll_sys_main_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd3_clk",
"pll_enet_500m_clk", "pll_enet_250m_clk",
- "pll_video_main_clk", };
+ "pll_video_post_div", };
static const char *qspi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd3_clk",
@@ -204,22 +204,22 @@ static const char *can2_sel[] = { "osc", "pll_sys_main_120m_clk",
static const char *i2c1_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c2_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c3_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c4_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *uart1_sel[] = { "osc", "pll_sys_main_240m_clk",
@@ -279,27 +279,27 @@ static const char *ecspi4_sel[] = { "osc", "pll_sys_main_240m_clk",
static const char *pwm1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm3_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm4_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
@@ -308,23 +308,23 @@ static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *sim2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_usb_main_clk", "pll_video_main_clk", "pll_enet_125m_clk",
+ "pll_usb_main_clk", "pll_video_post_div", "pll_enet_125m_clk",
"pll_sys_pfd7_clk", };
static const char *gpt1_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_1", };
static const char *gpt2_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_2", };
static const char *gpt3_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_3", };
static const char *gpt4_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_4", };
static const char *trace_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -339,12 +339,12 @@ static const char *wdog_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *csi_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *audio_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *wrclk_sel[] = { "osc", "pll_enet_40m_clk",
@@ -358,13 +358,13 @@ static const char *clko1_sel[] = { "osc", "pll_sys_main_clk",
static const char *clko2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_sys_pfd0_392m_clk", "pll_sys_pfd1_166m_clk", "pll_sys_pfd4_clk",
- "pll_audio_post_div", "pll_video_main_clk", "ckil", };
+ "pll_audio_post_div", "pll_video_post_div", "ckil", };
static const char *lvds1_sel[] = { "pll_arm_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd1_332m_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd5_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_enet_500m_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_enet_125m_clk", "pll_enet_100m_clk",
"pll_enet_50m_clk", "pll_enet_40m_clk", "pll_enet_25m_clk",
"pll_dram_main_clk", };
@@ -433,23 +433,22 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_AUDIO_MAIN_BYPASS] = imx_clk_mux_flags("pll_audio_main_bypass", base + 0xf0, 16, 1, pll_audio_bypass_sel, ARRAY_SIZE(pll_audio_bypass_sel), CLK_SET_RATE_PARENT);
clks[IMX7D_PLL_VIDEO_MAIN_BYPASS] = imx_clk_mux_flags("pll_video_main_bypass", base + 0x130, 16, 1, pll_video_bypass_sel, ARRAY_SIZE(pll_video_bypass_sel), CLK_SET_RATE_PARENT);
- clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_ENET_MAIN_BYPASS], clks[IMX7D_PLL_ENET_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]);
-
clks[IMX7D_PLL_ARM_MAIN_CLK] = imx_clk_gate("pll_arm_main_clk", "pll_arm_main_bypass", base + 0x60, 13);
- clks[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_gate("pll_dram_main_clk", "pll_dram_main_bypass", base + 0x70, 13);
+ clks[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_gate("pll_dram_main_clk", "pll_dram_test_div", base + 0x70, 13);
clks[IMX7D_PLL_SYS_MAIN_CLK] = imx_clk_gate("pll_sys_main_clk", "pll_sys_main_bypass", base + 0xb0, 13);
clks[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13);
clks[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13);
+ clks[IMX7D_PLL_DRAM_TEST_DIV] = clk_register_divider_table(NULL, "pll_dram_test_div", "pll_dram_main_bypass",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 21, 2, 0, test_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_AUDIO_TEST_DIV] = clk_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 19, 2, 0, test_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_AUDIO_POST_DIV] = clk_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 22, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX7D_PLL_VIDEO_TEST_DIV] = clk_register_divider_table(NULL, "pll_video_test_div", "pll_video_main_clk",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 19, 2, 0, test_div_table, &imx_ccm_lock);
+ clks[IMX7D_PLL_VIDEO_POST_DIV] = clk_register_divider_table(NULL, "pll_video_post_div", "pll_video_test_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 22, 2, 0, post_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0);
clks[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1);
@@ -797,7 +796,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
- clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0);
+ clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
@@ -863,6 +862,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
+ clks[IMX7D_USB_CTRL_CLK] = imx_clk_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
+ clks[IMX7D_USB_PHY1_CLK] = imx_clk_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
+ clks[IMX7D_USB_PHY2_CLK] = imx_clk_gate4("usb_phy2_clk", "pll_usb_main_clk", base + 0x46b0, 0);
clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
@@ -882,12 +884,23 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
+ clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_ENET_MAIN_BYPASS], clks[IMX7D_PLL_ENET_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]);
+
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
/* set uart module clock's parent clock source that must be great then 80MHz */
clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
+ /* Set clock rate for USBPHY, the USB_PLL at CCM is from USBOTG2 */
+ clks[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
+ clks[IMX7D_USB_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
+
imx_register_uart_clocks(uart_clks);
}
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index 85b5cbe9744c..eeba3cb14e2d 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -182,8 +182,12 @@ static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 dp_op, dp_mfd, dp_mfn;
+ int ret;
+
+ ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+ if (ret)
+ return ret;
- __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
dp_op, dp_mfd, dp_mfn);
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index d69c4bbf3597..8076ec040f37 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -123,6 +123,13 @@ static inline struct clk *imx_clk_gate(const char *name, const char *parent,
shift, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_gate_flags(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, unsigned long flags)
+{
+ return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -137,6 +144,13 @@ static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
shift, 0x3, 0, &imx_ccm_lock, NULL);
}
+static inline struct clk *imx_clk_gate2_flags(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, unsigned long flags)
+{
+ return clk_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
+ shift, 0x3, 0, &imx_ccm_lock, NULL);
+}
+
static inline struct clk *imx_clk_gate2_shared(const char *name,
const char *parent, void __iomem *reg, u8 shift,
unsigned int *share_count)
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 9cdf9d5050ac..4cb70bed89a9 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -29,21 +29,10 @@
#define SCI_CLK_INPUT_TERMINATION BIT(2)
/**
- * struct sci_clk_data - TI SCI clock data
- * @dev: device index
- * @num_clks: number of clocks for this device
- */
-struct sci_clk_data {
- u16 dev;
- u16 num_clks;
-};
-
-/**
* struct sci_clk_provider - TI SCI clock provider representation
* @sci: Handle to the System Control Interface protocol handler
* @ops: Pointer to the SCI ops to be used by the clocks
* @dev: Device pointer for the clock provider
- * @clk_data: Clock data
* @clocks: Clocks array for this device
* @num_clocks: Total number of clocks for this provider
*/
@@ -51,8 +40,7 @@ struct sci_clk_provider {
const struct ti_sci_handle *sci;
const struct ti_sci_clk_ops *ops;
struct device *dev;
- const struct sci_clk_data *clk_data;
- struct clk_hw **clocks;
+ struct sci_clk **clocks;
int num_clocks;
};
@@ -61,6 +49,7 @@ struct sci_clk_provider {
* @hw: Hardware clock cookie for common clock framework
* @dev_id: Device index
* @clk_id: Clock index
+ * @num_parents: Number of parents for this clock
* @provider: Master clock provider
* @flags: Flags for the clock
*/
@@ -68,6 +57,7 @@ struct sci_clk {
struct clk_hw hw;
u16 dev_id;
u8 clk_id;
+ u8 num_parents;
struct sci_clk_provider *provider;
u8 flags;
};
@@ -273,38 +263,22 @@ static const struct clk_ops sci_clk_ops = {
/**
* _sci_clk_get - Gets a handle for an SCI clock
* @provider: Handle to SCI clock provider
- * @dev_id: device ID for the clock to register
- * @clk_id: clock ID for the clock to register
+ * @sci_clk: Handle to the SCI clock to populate
*
* Gets a handle to an existing TI SCI hw clock, or builds a new clock
* entry and registers it with the common clock framework. Called from
* the common clock framework, when a corresponding of_clk_get call is
* executed, or recursively from itself when parsing parent clocks.
- * Returns a pointer to the hw clock struct, or ERR_PTR value in failure.
+ * Returns 0 on success, negative error code on failure.
*/
-static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
- u16 dev_id, u8 clk_id)
+static int _sci_clk_build(struct sci_clk_provider *provider,
+ struct sci_clk *sci_clk)
{
struct clk_init_data init = { NULL };
- struct sci_clk *sci_clk = NULL;
char *name = NULL;
char **parent_names = NULL;
int i;
- int ret;
-
- sci_clk = devm_kzalloc(provider->dev, sizeof(*sci_clk), GFP_KERNEL);
- if (!sci_clk)
- return ERR_PTR(-ENOMEM);
-
- sci_clk->dev_id = dev_id;
- sci_clk->clk_id = clk_id;
- sci_clk->provider = provider;
-
- ret = provider->ops->get_num_parents(provider->sci, dev_id,
- clk_id,
- &init.num_parents);
- if (ret)
- goto err;
+ int ret = 0;
name = kasprintf(GFP_KERNEL, "%s:%d:%d", dev_name(provider->dev),
sci_clk->dev_id, sci_clk->clk_id);
@@ -317,11 +291,11 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
* to have mux functionality. Otherwise it is going to act as a root
* clock.
*/
- if (init.num_parents < 2)
- init.num_parents = 0;
+ if (sci_clk->num_parents < 2)
+ sci_clk->num_parents = 0;
- if (init.num_parents) {
- parent_names = kcalloc(init.num_parents, sizeof(char *),
+ if (sci_clk->num_parents) {
+ parent_names = kcalloc(sci_clk->num_parents, sizeof(char *),
GFP_KERNEL);
if (!parent_names) {
@@ -329,7 +303,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
goto err;
}
- for (i = 0; i < init.num_parents; i++) {
+ for (i = 0; i < sci_clk->num_parents; i++) {
char *parent_name;
parent_name = kasprintf(GFP_KERNEL, "%s:%d:%d",
@@ -346,6 +320,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
}
init.ops = &sci_clk_ops;
+ init.num_parents = sci_clk->num_parents;
sci_clk->hw.init = &init;
ret = devm_clk_hw_register(provider->dev, &sci_clk->hw);
@@ -354,7 +329,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
err:
if (parent_names) {
- for (i = 0; i < init.num_parents; i++)
+ for (i = 0; i < sci_clk->num_parents; i++)
kfree(parent_names[i]);
kfree(parent_names);
@@ -362,10 +337,7 @@ err:
kfree(name);
- if (ret)
- return ERR_PTR(ret);
-
- return &sci_clk->hw;
+ return ret;
}
static int _cmp_sci_clk(const void *a, const void *b)
@@ -414,253 +386,20 @@ static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
static int ti_sci_init_clocks(struct sci_clk_provider *p)
{
- const struct sci_clk_data *data = p->clk_data;
- struct clk_hw *hw;
int i;
- int num_clks = 0;
-
- while (data->num_clks) {
- num_clks += data->num_clks;
- data++;
- }
-
- p->num_clocks = num_clks;
-
- p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk),
- GFP_KERNEL);
- if (!p->clocks)
- return -ENOMEM;
-
- num_clks = 0;
-
- data = p->clk_data;
-
- while (data->num_clks) {
- for (i = 0; i < data->num_clks; i++) {
- hw = _sci_clk_build(p, data->dev, i);
- if (!IS_ERR(hw)) {
- p->clocks[num_clks++] = hw;
- continue;
- }
-
- /* Skip any holes in the clock lists */
- if (PTR_ERR(hw) == -ENODEV)
- continue;
+ int ret;
- return PTR_ERR(hw);
- }
- data++;
+ for (i = 0; i < p->num_clocks; i++) {
+ ret = _sci_clk_build(p, p->clocks[i]);
+ if (ret)
+ return ret;
}
return 0;
}
-static const struct sci_clk_data k2g_clk_data[] = {
- /* pmmc */
- { .dev = 0x0, .num_clks = 4 },
-
- /* mlb0 */
- { .dev = 0x1, .num_clks = 5 },
-
- /* dss0 */
- { .dev = 0x2, .num_clks = 2 },
-
- /* mcbsp0 */
- { .dev = 0x3, .num_clks = 8 },
-
- /* mcasp0 */
- { .dev = 0x4, .num_clks = 8 },
-
- /* mcasp1 */
- { .dev = 0x5, .num_clks = 8 },
-
- /* mcasp2 */
- { .dev = 0x6, .num_clks = 8 },
-
- /* dcan0 */
- { .dev = 0x8, .num_clks = 2 },
-
- /* dcan1 */
- { .dev = 0x9, .num_clks = 2 },
-
- /* emif0 */
- { .dev = 0xa, .num_clks = 6 },
-
- /* mmchs0 */
- { .dev = 0xb, .num_clks = 3 },
-
- /* mmchs1 */
- { .dev = 0xc, .num_clks = 3 },
-
- /* gpmc0 */
- { .dev = 0xd, .num_clks = 1 },
-
- /* elm0 */
- { .dev = 0xe, .num_clks = 1 },
-
- /* spi0 */
- { .dev = 0x10, .num_clks = 1 },
-
- /* spi1 */
- { .dev = 0x11, .num_clks = 1 },
-
- /* spi2 */
- { .dev = 0x12, .num_clks = 1 },
-
- /* spi3 */
- { .dev = 0x13, .num_clks = 1 },
-
- /* icss0 */
- { .dev = 0x14, .num_clks = 6 },
-
- /* icss1 */
- { .dev = 0x15, .num_clks = 6 },
-
- /* usb0 */
- { .dev = 0x16, .num_clks = 7 },
-
- /* usb1 */
- { .dev = 0x17, .num_clks = 7 },
-
- /* nss0 */
- { .dev = 0x18, .num_clks = 14 },
-
- /* pcie0 */
- { .dev = 0x19, .num_clks = 1 },
-
- /* gpio0 */
- { .dev = 0x1b, .num_clks = 1 },
-
- /* gpio1 */
- { .dev = 0x1c, .num_clks = 1 },
-
- /* timer64_0 */
- { .dev = 0x1d, .num_clks = 9 },
-
- /* timer64_1 */
- { .dev = 0x1e, .num_clks = 9 },
-
- /* timer64_2 */
- { .dev = 0x1f, .num_clks = 9 },
-
- /* timer64_3 */
- { .dev = 0x20, .num_clks = 9 },
-
- /* timer64_4 */
- { .dev = 0x21, .num_clks = 9 },
-
- /* timer64_5 */
- { .dev = 0x22, .num_clks = 9 },
-
- /* timer64_6 */
- { .dev = 0x23, .num_clks = 9 },
-
- /* msgmgr0 */
- { .dev = 0x25, .num_clks = 1 },
-
- /* bootcfg0 */
- { .dev = 0x26, .num_clks = 1 },
-
- /* arm_bootrom0 */
- { .dev = 0x27, .num_clks = 1 },
-
- /* dsp_bootrom0 */
- { .dev = 0x29, .num_clks = 1 },
-
- /* debugss0 */
- { .dev = 0x2b, .num_clks = 8 },
-
- /* uart0 */
- { .dev = 0x2c, .num_clks = 1 },
-
- /* uart1 */
- { .dev = 0x2d, .num_clks = 1 },
-
- /* uart2 */
- { .dev = 0x2e, .num_clks = 1 },
-
- /* ehrpwm0 */
- { .dev = 0x2f, .num_clks = 1 },
-
- /* ehrpwm1 */
- { .dev = 0x30, .num_clks = 1 },
-
- /* ehrpwm2 */
- { .dev = 0x31, .num_clks = 1 },
-
- /* ehrpwm3 */
- { .dev = 0x32, .num_clks = 1 },
-
- /* ehrpwm4 */
- { .dev = 0x33, .num_clks = 1 },
-
- /* ehrpwm5 */
- { .dev = 0x34, .num_clks = 1 },
-
- /* eqep0 */
- { .dev = 0x35, .num_clks = 1 },
-
- /* eqep1 */
- { .dev = 0x36, .num_clks = 1 },
-
- /* eqep2 */
- { .dev = 0x37, .num_clks = 1 },
-
- /* ecap0 */
- { .dev = 0x38, .num_clks = 1 },
-
- /* ecap1 */
- { .dev = 0x39, .num_clks = 1 },
-
- /* i2c0 */
- { .dev = 0x3a, .num_clks = 1 },
-
- /* i2c1 */
- { .dev = 0x3b, .num_clks = 1 },
-
- /* i2c2 */
- { .dev = 0x3c, .num_clks = 1 },
-
- /* edma0 */
- { .dev = 0x3f, .num_clks = 2 },
-
- /* semaphore0 */
- { .dev = 0x40, .num_clks = 1 },
-
- /* intc0 */
- { .dev = 0x41, .num_clks = 1 },
-
- /* gic0 */
- { .dev = 0x42, .num_clks = 1 },
-
- /* qspi0 */
- { .dev = 0x43, .num_clks = 5 },
-
- /* arm_64b_counter0 */
- { .dev = 0x44, .num_clks = 2 },
-
- /* tetris0 */
- { .dev = 0x45, .num_clks = 2 },
-
- /* cgem0 */
- { .dev = 0x46, .num_clks = 2 },
-
- /* msmc0 */
- { .dev = 0x47, .num_clks = 1 },
-
- /* cbass0 */
- { .dev = 0x49, .num_clks = 1 },
-
- /* board0 */
- { .dev = 0x4c, .num_clks = 36 },
-
- /* edma1 */
- { .dev = 0x4f, .num_clks = 2 },
- { .num_clks = 0 },
-};
-
static const struct of_device_id ti_sci_clk_of_match[] = {
- { .compatible = "ti,k2g-sci-clk", .data = &k2g_clk_data },
+ { .compatible = "ti,k2g-sci-clk" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match);
@@ -681,12 +420,16 @@ static int ti_sci_clk_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct sci_clk_provider *provider;
const struct ti_sci_handle *handle;
- const struct sci_clk_data *data;
int ret;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
+ int num_clks = 0;
+ struct sci_clk **clks = NULL;
+ struct sci_clk **tmp_clks;
+ struct sci_clk *sci_clk;
+ int max_clks = 0;
+ int clk_id = 0;
+ int dev_id = 0;
+ u8 num_parents;
+ int gap_size = 0;
handle = devm_ti_sci_get_handle(dev);
if (IS_ERR(handle))
@@ -696,12 +439,69 @@ static int ti_sci_clk_probe(struct platform_device *pdev)
if (!provider)
return -ENOMEM;
- provider->clk_data = data;
-
provider->sci = handle;
provider->ops = &handle->ops.clk_ops;
provider->dev = dev;
+ while (1) {
+ ret = provider->ops->get_num_parents(provider->sci, dev_id,
+ clk_id, &num_parents);
+ if (ret) {
+ gap_size++;
+ if (!clk_id) {
+ if (gap_size >= 5)
+ break;
+ dev_id++;
+ } else {
+ if (gap_size >= 2) {
+ dev_id++;
+ clk_id = 0;
+ gap_size = 0;
+ } else {
+ clk_id++;
+ }
+ }
+ continue;
+ }
+
+ gap_size = 0;
+
+ if (num_clks == max_clks) {
+ tmp_clks = devm_kmalloc_array(dev, max_clks + 64,
+ sizeof(sci_clk),
+ GFP_KERNEL);
+ memcpy(tmp_clks, clks, max_clks * sizeof(sci_clk));
+ if (max_clks)
+ devm_kfree(dev, clks);
+ max_clks += 64;
+ clks = tmp_clks;
+ }
+
+ sci_clk = devm_kzalloc(dev, sizeof(*sci_clk), GFP_KERNEL);
+ if (!sci_clk)
+ return -ENOMEM;
+ sci_clk->dev_id = dev_id;
+ sci_clk->clk_id = clk_id;
+ sci_clk->provider = provider;
+ sci_clk->num_parents = num_parents;
+
+ clks[num_clks] = sci_clk;
+
+ clk_id++;
+ num_clks++;
+ }
+
+ provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
+ GFP_KERNEL);
+ if (!provider->clocks)
+ return -ENOMEM;
+
+ memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
+
+ provider->num_clocks = num_clks;
+
+ devm_kfree(dev, clks);
+
ret = ti_sci_init_clocks(provider);
if (ret) {
pr_err("ti-sci-init-clocks failed.\n");
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 1f9ea0f21df1..92afe5989e97 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -54,6 +54,12 @@ config COMMON_CLK_MT2701_BDPSYS
---help---
This driver supports MediaTek MT2701 bdpsys clocks.
+config COMMON_CLK_MT2701_AUDSYS
+ bool "Clock driver for Mediatek MT2701 audsys"
+ depends on COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 audsys clocks.
+
config COMMON_CLK_MT2712
bool "Clock driver for MediaTek MT2712"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 5160fdc4bbb8..b80eff2abb31 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
obj-$(CONFIG_COMMON_CLK_MT6797_VDECSYS) += clk-mt6797-vdec.o
obj-$(CONFIG_COMMON_CLK_MT6797_VENCSYS) += clk-mt6797-venc.o
obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o
+obj-$(CONFIG_COMMON_CLK_MT2701_AUDSYS) += clk-mt2701-aud.o
obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o
obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o
obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
new file mode 100644
index 000000000000..e66896a44fad
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs audio2_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs audio3_cg_regs = {
+ .set_ofs = 0x634,
+ .clr_ofs = 0x634,
+ .sta_ofs = 0x634,
+};
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
+ GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
+ GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
+ GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
+ GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
+ GATE_AUDIO1(CLK_AUD_I2SIN3, "audio_i2sin3", "aud_mux1_sel", 2),
+ GATE_AUDIO1(CLK_AUD_I2SIN4, "audio_i2sin4", "aud_mux1_sel", 3),
+ GATE_AUDIO1(CLK_AUD_I2SIN5, "audio_i2sin5", "aud_mux1_sel", 4),
+ GATE_AUDIO1(CLK_AUD_I2SIN6, "audio_i2sin6", "aud_mux1_sel", 5),
+ GATE_AUDIO1(CLK_AUD_I2SO1, "audio_i2so1", "aud_mux1_sel", 6),
+ GATE_AUDIO1(CLK_AUD_I2SO2, "audio_i2so2", "aud_mux1_sel", 7),
+ GATE_AUDIO1(CLK_AUD_I2SO3, "audio_i2so3", "aud_mux1_sel", 8),
+ GATE_AUDIO1(CLK_AUD_I2SO4, "audio_i2so4", "aud_mux1_sel", 9),
+ GATE_AUDIO1(CLK_AUD_I2SO5, "audio_i2so5", "aud_mux1_sel", 10),
+ GATE_AUDIO1(CLK_AUD_I2SO6, "audio_i2so6", "aud_mux1_sel", 11),
+ GATE_AUDIO1(CLK_AUD_ASRCI1, "audio_asrci1", "asm_h_sel", 12),
+ GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
+ GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
+ GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
+ GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
+ /* AUDIO2 */
+ GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL2, "audio_ul2", "aud_mux1_sel", 1),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL3, "audio_ul3", "aud_mux1_sel", 2),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL4, "audio_ul4", "aud_mux1_sel", 3),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL5, "audio_ul5", "aud_mux1_sel", 4),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL6, "audio_ul6", "aud_mux1_sel", 5),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL1, "audio_dl1", "aud_mux1_sel", 6),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL2, "audio_dl2", "aud_mux1_sel", 7),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL3, "audio_dl3", "aud_mux1_sel", 8),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL4, "audio_dl4", "aud_mux1_sel", 9),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL5, "audio_dl5", "aud_mux1_sel", 10),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL6, "audio_dl6", "aud_mux1_sel", 11),
+ GATE_AUDIO2(CLK_AUD_MMIF_DLMCH, "audio_dlmch", "aud_mux1_sel", 12),
+ GATE_AUDIO2(CLK_AUD_MMIF_ARB1, "audio_arb1", "aud_mux1_sel", 13),
+ GATE_AUDIO2(CLK_AUD_MMIF_AWB1, "audio_awb", "aud_mux1_sel", 14),
+ GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
+ GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
+ /* AUDIO3 */
+ GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
+ GATE_AUDIO3(CLK_AUD_ASRCI6, "audio_asrci6", "asm_h_sel", 5),
+ GATE_AUDIO3(CLK_AUD_ASRCO3, "audio_asrco3", "asm_h_sel", 6),
+ GATE_AUDIO3(CLK_AUD_ASRCO4, "audio_asrco4", "asm_h_sel", 7),
+ GATE_AUDIO3(CLK_AUD_ASRCO5, "audio_asrco5", "asm_h_sel", 8),
+ GATE_AUDIO3(CLK_AUD_ASRCO6, "audio_asrco6", "asm_h_sel", 9),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC1, "audio_mem_asrc1", "asm_h_sel", 10),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC2, "audio_mem_asrc2", "asm_h_sel", 11),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC3, "audio_mem_asrc3", "asm_h_sel", 12),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC4, "audio_mem_asrc4", "asm_h_sel", 13),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
+};
+
+static const struct of_device_id of_match_clk_mt2701_aud[] = {
+ { .compatible = "mediatek,mt2701-audsys", },
+ {}
+};
+
+static int clk_mt2701_aud_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUD_NR);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ goto err_clk_provider;
+ }
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ goto err_plat_populate;
+
+ return 0;
+
+err_plat_populate:
+ of_clk_del_provider(node);
+err_clk_provider:
+ return r;
+}
+
+static struct platform_driver clk_mt2701_aud_drv = {
+ .probe = clk_mt2701_aud_probe,
+ .driver = {
+ .name = "clk-mt2701-aud",
+ .of_match_table = of_match_clk_mt2701_aud,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 8e7f16fd87c9..deca7527f92f 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = {
FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
+ FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4),
};
static const char * const axi_parents[] = {
@@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
- GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
- GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
- GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
- GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
- GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
- GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
- GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8),
+ GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7),
+ GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6),
+ GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5),
+ GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4),
+ GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3),
+ GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2),
GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 498d13799388..991d4093726e 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -221,6 +221,8 @@ static const struct mtk_fixed_factor top_divs[] = {
4),
FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
4),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1,
+ 3),
};
static const char * const axi_parents[] = {
@@ -625,7 +627,7 @@ static const char * const ether_125m_parents[] = {
static const char * const ether_50m_parents[] = {
"clk26m",
"etherpll_50m",
- "univpll_d26",
+ "apll1_d3",
"univpll3_d4"
};
@@ -686,7 +688,7 @@ static const char * const i2c_parents[] = {
static const char * const msdc0p_aes_parents[] = {
"clk26m",
- "msdcpll_ck",
+ "syspll_d2",
"univpll_d3",
"vcodecpll_ck"
};
@@ -719,6 +721,17 @@ static const char * const aud_apll2_parents[] = {
"clkaud_ext_i_2"
};
+static const char * const apll1_ref_parents[] = {
+ "clkaud_ext_i_2",
+ "clkaud_ext_i_1",
+ "clki2si0_mck_i",
+ "clki2si1_mck_i",
+ "clki2si2_mck_i",
+ "clktdmin_mclk_i",
+ "clki2si2_mck_i",
+ "clktdmin_mclk_i"
+};
+
static const char * const audull_vtx_parents[] = {
"d2a_ulclk_6p5m",
"clkaud_ext_i_0"
@@ -886,6 +899,10 @@ static struct mtk_composite top_muxes[] = {
aud_apll2_parents, 0x134, 1, 1),
MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
audull_vtx_parents, 0x134, 31, 1),
+ MUX(CLK_TOP_APLL1_REF_SEL, "apll1_ref_sel",
+ apll1_ref_parents, 0x134, 4, 3),
+ MUX(CLK_TOP_APLL2_REF_SEL, "apll2_ref_sel",
+ apll1_ref_parents, 0x134, 7, 3),
};
static const char * const mcu_mp0_parents[] = {
@@ -932,36 +949,56 @@ static const struct mtk_clk_divider top_adj_divs[] = {
DIV_ADJ(CLK_TOP_APLL_DIV7, "apll_div7", "i2si3_sel", 0x128, 24, 8),
};
-static const struct mtk_gate_regs top_cg_regs = {
+static const struct mtk_gate_regs top0_cg_regs = {
.set_ofs = 0x120,
.clr_ofs = 0x120,
.sta_ofs = 0x120,
};
-#define GATE_TOP(_id, _name, _parent, _shift) { \
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x424,
+ .clr_ofs = 0x424,
+ .sta_ofs = 0x424,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
.id = _id, \
.name = _name, \
.parent_name = _parent, \
- .regs = &top_cg_regs, \
+ .regs = &top0_cg_regs, \
.shift = _shift, \
.ops = &mtk_clk_gate_ops_no_setclr, \
}
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
static const struct mtk_gate top_clks[] = {
- GATE_TOP(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_NFI2X_EN, "nfi2x_en", "nfi2x_sel", 0),
+ GATE_TOP1(CLK_TOP_NFIECC_EN, "nfiecc_en", "nfiecc_sel", 1),
+ GATE_TOP1(CLK_TOP_NFI1X_CK_EN, "nfi1x_ck_en", "nfi2x_sel", 2),
};
static const struct mtk_gate_regs infra_cg_regs = {
.set_ofs = 0x40,
.clr_ofs = 0x44,
- .sta_ofs = 0x40,
+ .sta_ofs = 0x48,
};
#define GATE_INFRA(_id, _name, _parent, _shift) { \
@@ -1120,6 +1157,10 @@ static const struct mtk_gate peri_clks[] = {
"msdc50_0_h_sel", 4),
GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
"msdc50_3_h_sel", 5),
+ GATE_PERI2(CLK_PERI_MSDC30_0_QTR_EN, "per_msdc30_0_q",
+ "axi_sel", 6),
+ GATE_PERI2(CLK_PERI_MSDC30_3_QTR_EN, "per_msdc30_3_q",
+ "mem_sel", 7),
};
#define MT2712_PLL_FMAX (3000UL * MHZ)
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index fad7d9fc53ba..4f3d47b41b3e 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -106,6 +106,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
+ GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
@@ -149,11 +150,23 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
clk_data);
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
+ if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
pdev->name, r);
+ goto err_clk_provider;
+ }
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ goto err_plat_populate;
+
+ return 0;
+
+err_plat_populate:
+ of_clk_del_provider(node);
+err_clk_provider:
return r;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 7694302c70a4..d5cbec522aec 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -3,10 +3,15 @@ config COMMON_CLK_AMLOGIC
depends on OF
depends on ARCH_MESON || COMPILE_TEST
+config COMMON_CLK_REGMAP_MESON
+ bool
+ select REGMAP
+
config COMMON_CLK_MESON8B
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
@@ -16,6 +21,8 @@ config COMMON_CLK_GXBB
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
+ select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
Say Y if you want peripherals and CPU frequency scaling to work.
@@ -24,6 +31,8 @@ config COMMON_CLK_AXG
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
+ select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 3c03ce583798..ffee82e60b7a 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,7 +2,8 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-regmap.o gxbb-aoclk-32k.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o
+obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 1294f3ad7cd5..5f5d468c1efe 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -11,125 +11,51 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "axg.h"
static DEFINE_SPINLOCK(meson_clk_lock);
-static const struct pll_rate_table sys_pll_rate_table[] = {
- PLL_RATE(24000000, 56, 1, 2),
- PLL_RATE(48000000, 64, 1, 2),
- PLL_RATE(72000000, 72, 1, 2),
- PLL_RATE(96000000, 64, 1, 2),
- PLL_RATE(120000000, 80, 1, 2),
- PLL_RATE(144000000, 96, 1, 2),
- PLL_RATE(168000000, 56, 1, 1),
- PLL_RATE(192000000, 64, 1, 1),
- PLL_RATE(216000000, 72, 1, 1),
- PLL_RATE(240000000, 80, 1, 1),
- PLL_RATE(264000000, 88, 1, 1),
- PLL_RATE(288000000, 96, 1, 1),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(408000000, 68, 1, 2),
- PLL_RATE(432000000, 72, 1, 2),
- PLL_RATE(456000000, 76, 1, 2),
- PLL_RATE(480000000, 80, 1, 2),
- PLL_RATE(504000000, 84, 1, 2),
- PLL_RATE(528000000, 88, 1, 2),
- PLL_RATE(552000000, 92, 1, 2),
- PLL_RATE(576000000, 96, 1, 2),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(840000000, 70, 1, 1),
- PLL_RATE(864000000, 72, 1, 1),
- PLL_RATE(888000000, 74, 1, 1),
- PLL_RATE(912000000, 76, 1, 1),
- PLL_RATE(936000000, 78, 1, 1),
- PLL_RATE(960000000, 80, 1, 1),
- PLL_RATE(984000000, 82, 1, 1),
- PLL_RATE(1008000000, 84, 1, 1),
- PLL_RATE(1032000000, 86, 1, 1),
- PLL_RATE(1056000000, 88, 1, 1),
- PLL_RATE(1080000000, 90, 1, 1),
- PLL_RATE(1104000000, 92, 1, 1),
- PLL_RATE(1128000000, 94, 1, 1),
- PLL_RATE(1152000000, 96, 1, 1),
- PLL_RATE(1176000000, 98, 1, 1),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
- PLL_RATE(1560000000, 65, 1, 0),
- PLL_RATE(1584000000, 66, 1, 0),
- PLL_RATE(1608000000, 67, 1, 0),
- PLL_RATE(1632000000, 68, 1, 0),
- PLL_RATE(1656000000, 68, 1, 0),
- PLL_RATE(1680000000, 68, 1, 0),
- PLL_RATE(1704000000, 68, 1, 0),
- PLL_RATE(1728000000, 69, 1, 0),
- PLL_RATE(1752000000, 69, 1, 0),
- PLL_RATE(1776000000, 69, 1, 0),
- PLL_RATE(1800000000, 69, 1, 0),
- PLL_RATE(1824000000, 70, 1, 0),
- PLL_RATE(1848000000, 70, 1, 0),
- PLL_RATE(1872000000, 70, 1, 0),
- PLL_RATE(1896000000, 70, 1, 0),
- PLL_RATE(1920000000, 71, 1, 0),
- PLL_RATE(1944000000, 71, 1, 0),
- PLL_RATE(1968000000, 71, 1, 0),
- PLL_RATE(1992000000, 71, 1, 0),
- PLL_RATE(2016000000, 72, 1, 0),
- PLL_RATE(2040000000, 72, 1, 0),
- PLL_RATE(2064000000, 72, 1, 0),
- PLL_RATE(2088000000, 72, 1, 0),
- PLL_RATE(2112000000, 73, 1, 0),
- { /* sentinel */ },
-};
-
-static struct meson_clk_pll axg_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
+static struct clk_regmap axg_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -138,25 +64,34 @@ static struct meson_clk_pll axg_fixed_pll = {
},
};
-static struct meson_clk_pll axg_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap axg_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 10,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -257,40 +192,51 @@ static const struct pll_rate_table axg_gp0_pll_rate_table[] = {
{ /* sentinel */ },
};
-static struct pll_params_table axg_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250),
- PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d),
- PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000),
-};
-
-static struct meson_clk_pll axg_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .params = {
- .params_table = axg_gp0_params_table,
- .params_count = ARRAY_SIZE(axg_gp0_params_table),
- .no_init_reset = true,
- .reset_lock_loop = true,
- },
- .rate_table = axg_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(axg_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
+static const struct reg_sequence axg_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap axg_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 10,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_gp0_pll_rate_table,
+ .init_regs = axg_gp0_init_regs,
+ .init_count = ARRAY_SIZE(axg_gp0_init_regs),
+ },
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -299,234 +245,427 @@ static struct meson_clk_pll axg_gp0_pll = {
},
};
+static const struct reg_sequence axg_hifi_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x00058000 },
+ { .reg = HHI_HIFI_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap axg_hifi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL5,
+ .shift = 0,
+ .width = 13,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_gp0_pll_rate_table,
+ .init_regs = axg_hifi_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
-static struct clk_fixed_factor axg_fclk_div2 = {
+static struct clk_fixed_factor axg_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div3 = {
+static struct clk_regmap axg_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div4 = {
+static struct clk_regmap axg_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div5 = {
+static struct clk_regmap axg_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div7 = {
+static struct clk_regmap axg_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap axg_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 0,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 1,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 2,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll3 = {
- .sdm = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 12,
- .width = 14,
+static struct clk_regmap axg_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 11,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_mpll3_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 12,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 11,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 2,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 3,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .n2 = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 2,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
},
- .en = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 0,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL3_CNTL0,
+ .bit_idx = 0,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll3",
- .ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll3_div" },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
-/*
- * FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
- * and should be modeled with their respective PLLs via the forthcoming
- * coordinated clock rates feature
- */
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
-static struct clk_mux axg_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
.parent_names = clk81_parent_names,
.num_parents = ARRAY_SIZE(clk81_parent_names),
},
};
-static struct clk_divider axg_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
@@ -545,42 +684,45 @@ static const char * const axg_sd_emmc_clk0_parent_names[] = {
};
/* SDcard clock */
-static struct clk_mux axg_sd_emmc_b_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider axg_sd_emmc_b_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap axg_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_sd_emmc_b_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 23,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -588,42 +730,45 @@ static struct clk_gate axg_sd_emmc_b_clk0 = {
};
/* EMMC/NAND clock */
-static struct clk_mux axg_sd_emmc_c_clk0_sel = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider axg_sd_emmc_c_clk0_div = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap axg_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_sd_emmc_c_clk0 = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -750,27 +895,24 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &axg_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &axg_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &axg_sd_emmc_c_clk0.hw,
+ [CLKID_MPLL0_DIV] = &axg_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &axg_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &axg_mpll2_div.hw,
+ [CLKID_MPLL3_DIV] = &axg_mpll3_div.hw,
+ [CLKID_HIFI_PLL] = &axg_hifi_pll.hw,
+ [CLKID_MPLL_PREDIV] = &axg_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &axg_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &axg_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &axg_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &axg_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &axg_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience tables to populate base addresses in .probe */
-
-static struct meson_clk_pll *const axg_clk_plls[] = {
- &axg_fixed_pll,
- &axg_sys_pll,
- &axg_gp0_pll,
-};
-
-static struct meson_clk_mpll *const axg_clk_mplls[] = {
- &axg_mpll0,
- &axg_mpll1,
- &axg_mpll2,
- &axg_mpll3,
-};
-
-static struct clk_gate *const axg_clk_gates[] = {
+/* Convenience table to populate regmap in .probe */
+static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_clk81,
&axg_ddr,
&axg_audio_locker,
@@ -818,113 +960,100 @@ static struct clk_gate *const axg_clk_gates[] = {
&axg_ao_i2c,
&axg_sd_emmc_b_clk0,
&axg_sd_emmc_c_clk0,
-};
-
-static struct clk_mux *const axg_clk_muxes[] = {
- &axg_mpeg_clk_sel,
- &axg_sd_emmc_b_clk0_sel,
- &axg_sd_emmc_c_clk0_sel,
-};
-
-static struct clk_divider *const axg_clk_dividers[] = {
&axg_mpeg_clk_div,
&axg_sd_emmc_b_clk0_div,
&axg_sd_emmc_c_clk0_div,
-};
-
-struct clkc_data {
- struct clk_gate *const *clk_gates;
- unsigned int clk_gates_count;
- struct meson_clk_mpll *const *clk_mplls;
- unsigned int clk_mplls_count;
- struct meson_clk_pll *const *clk_plls;
- unsigned int clk_plls_count;
- struct clk_mux *const *clk_muxes;
- unsigned int clk_muxes_count;
- struct clk_divider *const *clk_dividers;
- unsigned int clk_dividers_count;
- struct clk_hw_onecell_data *hw_onecell_data;
-};
-
-static const struct clkc_data axg_clkc_data = {
- .clk_gates = axg_clk_gates,
- .clk_gates_count = ARRAY_SIZE(axg_clk_gates),
- .clk_mplls = axg_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(axg_clk_mplls),
- .clk_plls = axg_clk_plls,
- .clk_plls_count = ARRAY_SIZE(axg_clk_plls),
- .clk_muxes = axg_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(axg_clk_muxes),
- .clk_dividers = axg_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(axg_clk_dividers),
- .hw_onecell_data = &axg_hw_onecell_data,
+ &axg_mpeg_clk_sel,
+ &axg_sd_emmc_b_clk0_sel,
+ &axg_sd_emmc_c_clk0_sel,
+ &axg_mpll0,
+ &axg_mpll1,
+ &axg_mpll2,
+ &axg_mpll3,
+ &axg_mpll0_div,
+ &axg_mpll1_div,
+ &axg_mpll2_div,
+ &axg_mpll3_div,
+ &axg_fixed_pll,
+ &axg_sys_pll,
+ &axg_gp0_pll,
+ &axg_hifi_pll,
+ &axg_mpll_prediv,
+ &axg_fclk_div2,
+ &axg_fclk_div3,
+ &axg_fclk_div4,
+ &axg_fclk_div5,
+ &axg_fclk_div7,
};
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
+ { .compatible = "amlogic,axg-clkc" },
{}
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct clkc_data *clkc_data;
struct resource *res;
- void __iomem *clk_base;
- int ret, clkid, i;
-
- clkc_data = of_device_get_match_data(&pdev->dev);
- if (!clkc_data)
- return -EINVAL;
-
- /* Generic clocks and PLLs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- clk_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(&pdev->dev, "Unable to map clk base\n");
- return -ENXIO;
- }
+ void __iomem *clk_base = NULL;
+ struct regmap *map;
+ int ret, i;
- /* Populate base address for PLLs */
- for (i = 0; i < clkc_data->clk_plls_count; i++)
- clkc_data->clk_plls[i]->base = clk_base;
+ /* Get the hhi system controller node if available */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap - Trying obsolete regs\n");
- /* Populate base address for MPLLs */
- for (i = 0; i < clkc_data->clk_mplls_count; i++)
- clkc_data->clk_mplls[i]->base = clk_base;
+ /*
+ * FIXME: HHI registers should be accessed through
+ * the appropriate system controller. This is required because
+ * there is more than just clocks in this register space
+ *
+ * This fallback method is only provided temporarily until
+ * all the platform DTs are properly using the syscon node
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
- /* Populate base address for gates */
- for (i = 0; i < clkc_data->clk_gates_count; i++)
- clkc_data->clk_gates[i]->reg = clk_base +
- (u64)clkc_data->clk_gates[i]->reg;
- /* Populate base address for muxes */
- for (i = 0; i < clkc_data->clk_muxes_count; i++)
- clkc_data->clk_muxes[i]->reg = clk_base +
- (u64)clkc_data->clk_muxes[i]->reg;
+ clk_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!clk_base) {
+ dev_err(dev, "Unable to map clk base\n");
+ return -ENXIO;
+ }
+
+ map = devm_regmap_init_mmio(dev, clk_base,
+ &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ }
- /* Populate base address for dividers */
- for (i = 0; i < clkc_data->clk_dividers_count; i++)
- clkc_data->clk_dividers[i]->reg = clk_base +
- (u64)clkc_data->clk_dividers[i]->reg;
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
+ axg_clk_regmaps[i]->map = map;
- for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) {
+ for (i = 0; i < axg_hw_onecell_data.num; i++) {
/* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[clkid])
+ if (!axg_hw_onecell_data.hws[i])
continue;
- ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[clkid]);
+ ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
+ dev_err(dev, "Clock registration failed\n");
return ret;
}
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &axg_hw_onecell_data);
}
static struct platform_driver axg_driver = {
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index ce0bafdb6b28..b421df6a7ea0 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -117,8 +117,18 @@
#define CLKID_SD_EMMC_B_CLK0_DIV 62
#define CLKID_SD_EMMC_C_CLK0_SEL 63
#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
-#define NR_CLKS 65
+#define NR_CLKS 76
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 6c07db06642d..f7ab5b1db342 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -28,8 +28,11 @@
#include <linux/clk-provider.h>
#include "clkc.h"
-#define to_meson_clk_audio_divider(_hw) container_of(_hw, \
- struct meson_clk_audio_divider, hw)
+static inline struct meson_clk_audio_div_data *
+meson_clk_audio_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_audio_div_data *)clk->data;
+}
static int _div_round(unsigned long parent_rate, unsigned long rate,
unsigned long flags)
@@ -45,15 +48,9 @@ static int _get_val(unsigned long parent_rate, unsigned long rate)
return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
}
-static int _valid_divider(struct clk_hw *hw, int divider)
+static int _valid_divider(unsigned int width, int divider)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- int max_divider;
- u8 width;
-
- width = adiv->div.width;
- max_divider = 1 << width;
+ int max_divider = 1 << width;
return clamp(divider, 1, max_divider);
}
@@ -61,14 +58,11 @@ static int _valid_divider(struct clk_hw *hw, int divider)
static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- struct parm *p;
- unsigned long reg, divider;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
+ unsigned long divider;
- p = &adiv->div;
- reg = readl(adiv->base + p->reg_off);
- divider = PARM_GET(p->width, p->shift, reg) + 1;
+ divider = meson_parm_read(clk->map, &adiv->div);
return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
}
@@ -77,14 +71,14 @@ static long audio_divider_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
unsigned long max_prate;
int divider;
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
divider = _div_round(*parent_rate, rate, adiv->flags);
- divider = _valid_divider(hw, divider);
+ divider = _valid_divider(adiv->div.width, divider);
return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
}
@@ -93,7 +87,7 @@ static long audio_divider_round_rate(struct clk_hw *hw,
/* Get the corresponding rounded down divider */
divider = max_prate / rate;
- divider = _valid_divider(hw, divider);
+ divider = _valid_divider(adiv->div.width, divider);
/* Get actual rate of the parent */
*parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
@@ -106,28 +100,11 @@ static int audio_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- struct parm *p;
- unsigned long reg, flags = 0;
- int val;
-
- val = _get_val(parent_rate, rate);
-
- if (adiv->lock)
- spin_lock_irqsave(adiv->lock, flags);
- else
- __acquire(adiv->lock);
-
- p = &adiv->div;
- reg = readl(adiv->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, val);
- writel(reg, adiv->base + p->reg_off);
-
- if (adiv->lock)
- spin_unlock_irqrestore(adiv->lock, flags);
- else
- __release(adiv->lock);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
+ int val = _get_val(parent_rate, rate);
+
+ meson_parm_write(clk->map, &adiv->div, val);
return 0;
}
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
deleted file mode 100644
index f8b2b7efd016..000000000000
--- a/drivers/clk/meson/clk-cpu.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * CPU clock path:
- *
- * +-[/N]-----|3|
- * MUX2 +--[/3]-+----------|2| MUX1
- * [sys_pll]---|1| |--[/2]------------|1|-|1|
- * | |---+------------------|0| | |----- [a5_clk]
- * +--|0| | |
- * [xtal]---+-------------------------------|0|
- *
- *
- *
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#define MESON_CPU_CLK_CNTL1 0x00
-#define MESON_CPU_CLK_CNTL 0x40
-
-#define MESON_CPU_CLK_MUX1 BIT(7)
-#define MESON_CPU_CLK_MUX2 BIT(0)
-
-#define MESON_N_WIDTH 9
-#define MESON_N_SHIFT 20
-#define MESON_SEL_WIDTH 2
-#define MESON_SEL_SHIFT 2
-
-#include "clkc.h"
-
-#define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw)
-#define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb)
-
-static long meson_clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
-
- return divider_round_rate(hw, rate, prate, clk_cpu->div_table,
- MESON_N_WIDTH, CLK_DIVIDER_ROUND_CLOSEST);
-}
-
-static int meson_clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
- unsigned int div, sel, N = 0;
- u32 reg;
-
- div = DIV_ROUND_UP(parent_rate, rate);
-
- if (div <= 3) {
- sel = div - 1;
- } else {
- sel = 3;
- N = div / 2;
- }
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
- reg = PARM_SET(MESON_N_WIDTH, MESON_N_SHIFT, reg, N);
- writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
- reg = PARM_SET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg, sel);
- writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
-
- return 0;
-}
-
-static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
- unsigned int N, sel;
- unsigned int div = 1;
- u32 reg;
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
- N = PARM_GET(MESON_N_WIDTH, MESON_N_SHIFT, reg);
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
- sel = PARM_GET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg);
-
- if (sel < 3)
- div = sel + 1;
- else
- div = 2 * N;
-
- return parent_rate / div;
-}
-
-/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
-static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu,
- struct clk_notifier_data *ndata)
-{
- u32 cpu_clk_cntl;
-
- /* switch MUX1 to xtal */
- cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- cpu_clk_cntl &= ~MESON_CPU_CLK_MUX1;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- udelay(100);
-
- /* switch MUX2 to sys-pll */
- cpu_clk_cntl |= MESON_CPU_CLK_MUX2;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
-
- return 0;
-}
-
-/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
-static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu,
- struct clk_notifier_data *ndata)
-{
- u32 cpu_clk_cntl;
-
- /* switch MUX1 to divisors' output */
- cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- cpu_clk_cntl |= MESON_CPU_CLK_MUX1;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- udelay(100);
-
- return 0;
-}
-
-/*
- * This clock notifier is called when the frequency of the of the parent
- * PLL clock is to be changed. We use the xtal input as temporary parent
- * while the PLL frequency is stabilized.
- */
-int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct clk_notifier_data *ndata = data;
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_nb(nb);
- int ret = 0;
-
- if (event == PRE_RATE_CHANGE)
- ret = meson_clk_cpu_pre_rate_change(clk_cpu, ndata);
- else if (event == POST_RATE_CHANGE)
- ret = meson_clk_cpu_post_rate_change(clk_cpu, ndata);
-
- return notifier_from_errno(ret);
-}
-
-const struct clk_ops meson_clk_cpu_ops = {
- .recalc_rate = meson_clk_cpu_recalc_rate,
- .round_rate = meson_clk_cpu_round_rate,
- .set_rate = meson_clk_cpu_set_rate,
-};
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 5144360e2c80..0df1227b65b3 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -68,11 +68,15 @@
#define N2_MIN 4
#define N2_MAX 511
-#define to_meson_clk_mpll(_hw) container_of(_hw, struct meson_clk_mpll, hw)
+static inline struct meson_clk_mpll_data *
+meson_clk_mpll_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_mpll_data *)clk->data;
+}
static long rate_from_params(unsigned long parent_rate,
- unsigned long sdm,
- unsigned long n2)
+ unsigned int sdm,
+ unsigned int n2)
{
unsigned long divisor = (SDM_DEN * n2) + sdm;
@@ -84,8 +88,8 @@ static long rate_from_params(unsigned long parent_rate,
static void params_from_rate(unsigned long requested_rate,
unsigned long parent_rate,
- unsigned long *sdm,
- unsigned long *n2)
+ unsigned int *sdm,
+ unsigned int *n2)
{
uint64_t div = parent_rate;
unsigned long rem = do_div(div, requested_rate);
@@ -105,31 +109,23 @@ static void params_from_rate(unsigned long requested_rate,
static unsigned long mpll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg, sdm, n2;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ unsigned int sdm, n2;
long rate;
- p = &mpll->sdm;
- reg = readl(mpll->base + p->reg_off);
- sdm = PARM_GET(p->width, p->shift, reg);
-
- p = &mpll->n2;
- reg = readl(mpll->base + p->reg_off);
- n2 = PARM_GET(p->width, p->shift, reg);
+ sdm = meson_parm_read(clk->map, &mpll->sdm);
+ n2 = meson_parm_read(clk->map, &mpll->n2);
rate = rate_from_params(parent_rate, sdm, n2);
- if (rate < 0)
- return 0;
-
- return rate;
+ return rate < 0 ? 0 : rate;
}
static long mpll_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
- unsigned long sdm, n2;
+ unsigned int sdm, n2;
params_from_rate(rate, *parent_rate, &sdm, &n2);
return rate_from_params(*parent_rate, sdm, n2);
@@ -139,9 +135,9 @@ static int mpll_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg, sdm, n2;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ unsigned int sdm, n2;
unsigned long flags = 0;
params_from_rate(rate, parent_rate, &sdm, &n2);
@@ -151,97 +147,36 @@ static int mpll_set_rate(struct clk_hw *hw,
else
__acquire(mpll->lock);
- p = &mpll->sdm;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, sdm);
- writel(reg, mpll->base + p->reg_off);
-
- p = &mpll->sdm_en;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, 1);
- writel(reg, mpll->base + p->reg_off);
-
- p = &mpll->ssen;
- if (p->width != 0) {
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, 1);
- writel(reg, mpll->base + p->reg_off);
- }
-
- p = &mpll->n2;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, n2);
- writel(reg, mpll->base + p->reg_off);
-
- if (mpll->lock)
- spin_unlock_irqrestore(mpll->lock, flags);
- else
- __release(mpll->lock);
-
- return 0;
-}
+ /* Enable and set the fractional part */
+ meson_parm_write(clk->map, &mpll->sdm, sdm);
+ meson_parm_write(clk->map, &mpll->sdm_en, 1);
-static void mpll_enable_core(struct clk_hw *hw, int enable)
-{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg;
- unsigned long flags = 0;
+ /* Set additional fractional part enable if required */
+ if (MESON_PARM_APPLICABLE(&mpll->ssen))
+ meson_parm_write(clk->map, &mpll->ssen, 1);
- if (mpll->lock)
- spin_lock_irqsave(mpll->lock, flags);
- else
- __acquire(mpll->lock);
+ /* Set the integer divider part */
+ meson_parm_write(clk->map, &mpll->n2, n2);
- p = &mpll->en;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, enable ? 1 : 0);
- writel(reg, mpll->base + p->reg_off);
+ /* Set the magic misc bit if required */
+ if (MESON_PARM_APPLICABLE(&mpll->misc))
+ meson_parm_write(clk->map, &mpll->misc, 1);
if (mpll->lock)
spin_unlock_irqrestore(mpll->lock, flags);
else
__release(mpll->lock);
-}
-
-
-static int mpll_enable(struct clk_hw *hw)
-{
- mpll_enable_core(hw, 1);
return 0;
}
-static void mpll_disable(struct clk_hw *hw)
-{
- mpll_enable_core(hw, 0);
-}
-
-static int mpll_is_enabled(struct clk_hw *hw)
-{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg;
- int en;
-
- p = &mpll->en;
- reg = readl(mpll->base + p->reg_off);
- en = PARM_GET(p->width, p->shift, reg);
-
- return en;
-}
-
const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
- .is_enabled = mpll_is_enabled,
};
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
.set_rate = mpll_set_rate,
- .enable = mpll_enable,
- .disable = mpll_disable,
- .is_enabled = mpll_is_enabled,
};
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 01341553f50b..65a7bd903551 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -2,6 +2,9 @@
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
+ * Copyright (c) 2018 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -27,13 +30,14 @@
* | |
* FREF VCO
*
- * out = (in * M / N) >> OD
+ * out = in * (m + frac / frac_max) / (n << sum(ods))
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -41,209 +45,213 @@
#include "clkc.h"
-#define MESON_PLL_RESET BIT(29)
-#define MESON_PLL_LOCK BIT(31)
-
-#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-
-static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static inline struct meson_clk_pll_data *
+meson_clk_pll_data(struct clk_regmap *clk)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- struct parm *p;
- unsigned long parent_rate_mhz = parent_rate / 1000000;
- unsigned long rate_mhz;
- u16 n, m, frac = 0, od, od2 = 0;
- u32 reg;
-
- p = &pll->n;
- reg = readl(pll->base + p->reg_off);
- n = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->m;
- reg = readl(pll->base + p->reg_off);
- m = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->od;
- reg = readl(pll->base + p->reg_off);
- od = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->od2;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- od2 = PARM_GET(p->width, p->shift, reg);
- }
-
- p = &pll->frac;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- frac = PARM_GET(p->width, p->shift, reg);
- rate_mhz = (parent_rate_mhz * m + \
- (parent_rate_mhz * frac >> 12)) * 2 / n;
- rate_mhz = rate_mhz >> od >> od2;
- } else
- rate_mhz = (parent_rate_mhz * m / n) >> od >> od2;
-
- return rate_mhz * 1000000;
+ return (struct meson_clk_pll_data *)clk->data;
}
-static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static unsigned long __pll_params_to_rate(unsigned long parent_rate,
+ const struct pll_rate_table *pllt,
+ u16 frac,
+ struct meson_clk_pll_data *pll)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- const struct pll_rate_table *rate_table = pll->rate_table;
- int i;
+ u64 rate = (u64)parent_rate * pllt->m;
+ unsigned int od = pllt->od + pllt->od2 + pllt->od3;
- for (i = 0; i < pll->rate_count; i++) {
- if (rate <= rate_table[i].rate)
- return rate_table[i].rate;
+ if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
+ u64 frac_rate = (u64)parent_rate * frac;
+
+ rate += DIV_ROUND_UP_ULL(frac_rate,
+ (1 << pll->frac.width));
}
- /* else return the smallest value */
- return rate_table[0].rate;
+ return DIV_ROUND_UP_ULL(rate, pllt->n << od);
}
-static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll,
- unsigned long rate)
+static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- const struct pll_rate_table *rate_table = pll->rate_table;
- int i;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ struct pll_rate_table pllt;
+ u16 frac;
- for (i = 0; i < pll->rate_count; i++) {
- if (rate == rate_table[i].rate)
- return &rate_table[i];
- }
- return NULL;
+ pllt.n = meson_parm_read(clk->map, &pll->n);
+ pllt.m = meson_parm_read(clk->map, &pll->m);
+ pllt.od = meson_parm_read(clk->map, &pll->od);
+
+ pllt.od2 = MESON_PARM_APPLICABLE(&pll->od2) ?
+ meson_parm_read(clk->map, &pll->od2) :
+ 0;
+
+ pllt.od3 = MESON_PARM_APPLICABLE(&pll->od3) ?
+ meson_parm_read(clk->map, &pll->od3) :
+ 0;
+
+ frac = MESON_PARM_APPLICABLE(&pll->frac) ?
+ meson_parm_read(clk->map, &pll->frac) :
+ 0;
+
+ return __pll_params_to_rate(parent_rate, &pllt, frac, pll);
}
-/* Specific wait loop for GXL/GXM GP0 PLL */
-static int meson_clk_pll_wait_lock_reset(struct meson_clk_pll *pll,
- struct parm *p_n)
+static u16 __pll_params_with_frac(unsigned long rate,
+ unsigned long parent_rate,
+ const struct pll_rate_table *pllt,
+ struct meson_clk_pll_data *pll)
{
- int delay = 100;
- u32 reg;
+ u16 frac_max = (1 << pll->frac.width);
+ u64 val = (u64)rate * pllt->n;
- while (delay > 0) {
- reg = readl(pll->base + p_n->reg_off);
- writel(reg | MESON_PLL_RESET, pll->base + p_n->reg_off);
- udelay(10);
- writel(reg & ~MESON_PLL_RESET, pll->base + p_n->reg_off);
+ val <<= pllt->od + pllt->od2 + pllt->od3;
- /* This delay comes from AMLogic tree clk-gp0-gxl driver */
- mdelay(1);
+ if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
+ val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
+ else
+ val = div_u64(val * frac_max, parent_rate);
- reg = readl(pll->base + p_n->reg_off);
- if (reg & MESON_PLL_LOCK)
- return 0;
- delay--;
+ val -= pllt->m * frac_max;
+
+ return min((u16)val, (u16)(frac_max - 1));
+}
+
+static const struct pll_rate_table *
+meson_clk_get_pll_settings(unsigned long rate,
+ struct meson_clk_pll_data *pll)
+{
+ const struct pll_rate_table *table = pll->table;
+ unsigned int i = 0;
+
+ if (!table)
+ return NULL;
+
+ /* Find the first table element exceeding rate */
+ while (table[i].rate && table[i].rate <= rate)
+ i++;
+
+ if (i != 0) {
+ if (MESON_PARM_APPLICABLE(&pll->frac) ||
+ !(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
+ (abs(rate - table[i - 1].rate) <
+ abs(rate - table[i].rate)))
+ i--;
}
- return -ETIMEDOUT;
+
+ return (struct pll_rate_table *)&table[i];
}
-static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll,
- struct parm *p_n)
+static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
- int delay = 24000000;
- u32 reg;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ const struct pll_rate_table *pllt =
+ meson_clk_get_pll_settings(rate, pll);
+ u16 frac;
+
+ if (!pllt)
+ return meson_clk_pll_recalc_rate(hw, *parent_rate);
+
+ if (!MESON_PARM_APPLICABLE(&pll->frac)
+ || rate == pllt->rate)
+ return pllt->rate;
+
+ /*
+ * The rate provided by the setting is not an exact match, let's
+ * try to improve the result using the fractional parameter
+ */
+ frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll);
+
+ return __pll_params_to_rate(*parent_rate, pllt, frac, pll);
+}
- while (delay > 0) {
- reg = readl(pll->base + p_n->reg_off);
+static int meson_clk_pll_wait_lock(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ int delay = 24000000;
- if (reg & MESON_PLL_LOCK)
+ do {
+ /* Is the clock locked now ? */
+ if (meson_parm_read(clk->map, &pll->l))
return 0;
+
delay--;
- }
+ } while (delay > 0);
+
return -ETIMEDOUT;
}
-static void meson_clk_pll_init_params(struct meson_clk_pll *pll)
+static void meson_clk_pll_init(struct clk_hw *hw)
{
- int i;
-
- for (i = 0 ; i < pll->params.params_count ; ++i)
- writel(pll->params.params_table[i].value,
- pll->base + pll->params.params_table[i].reg_off);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+
+ if (pll->init_count) {
+ meson_parm_write(clk->map, &pll->rst, 1);
+ regmap_multi_reg_write(clk->map, pll->init_regs,
+ pll->init_count);
+ meson_parm_write(clk->map, &pll->rst, 0);
+ }
}
static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- struct parm *p;
- const struct pll_rate_table *rate_set;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ const struct pll_rate_table *pllt;
unsigned long old_rate;
- int ret = 0;
- u32 reg;
+ u16 frac = 0;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
old_rate = rate;
- rate_set = meson_clk_get_pll_settings(pll, rate);
- if (!rate_set)
+ pllt = meson_clk_get_pll_settings(rate, pll);
+ if (!pllt)
return -EINVAL;
- /* Initialize the PLL in a clean state if specified */
- if (pll->params.params_count)
- meson_clk_pll_init_params(pll);
-
- /* PLL reset */
- p = &pll->n;
- reg = readl(pll->base + p->reg_off);
- /* If no_init_reset is provided, avoid resetting at this point */
- if (!pll->params.no_init_reset)
- writel(reg | MESON_PLL_RESET, pll->base + p->reg_off);
-
- reg = PARM_SET(p->width, p->shift, reg, rate_set->n);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->m;
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->m);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->od;
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->od);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->od2;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->od2);
- writel(reg, pll->base + p->reg_off);
- }
+ /* Put the pll in reset to write the params */
+ meson_parm_write(clk->map, &pll->rst, 1);
- p = &pll->frac;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->frac);
- writel(reg, pll->base + p->reg_off);
- }
+ meson_parm_write(clk->map, &pll->n, pllt->n);
+ meson_parm_write(clk->map, &pll->m, pllt->m);
+ meson_parm_write(clk->map, &pll->od, pllt->od);
+
+ if (MESON_PARM_APPLICABLE(&pll->od2))
+ meson_parm_write(clk->map, &pll->od2, pllt->od2);
+
+ if (MESON_PARM_APPLICABLE(&pll->od3))
+ meson_parm_write(clk->map, &pll->od3, pllt->od3);
- p = &pll->n;
- /* If clear_reset_for_lock is provided, remove the reset bit here */
- if (pll->params.clear_reset_for_lock) {
- reg = readl(pll->base + p->reg_off);
- writel(reg & ~MESON_PLL_RESET, pll->base + p->reg_off);
+ if (MESON_PARM_APPLICABLE(&pll->frac)) {
+ frac = __pll_params_with_frac(rate, parent_rate, pllt, pll);
+ meson_parm_write(clk->map, &pll->frac, frac);
}
- /* If reset_lock_loop, use a special loop including resetting */
- if (pll->params.reset_lock_loop)
- ret = meson_clk_pll_wait_lock_reset(pll, p);
- else
- ret = meson_clk_pll_wait_lock(pll, p);
- if (ret) {
+ /* make sure the reset is cleared at this point */
+ meson_parm_write(clk->map, &pll->rst, 0);
+
+ if (meson_clk_pll_wait_lock(hw)) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
+ /*
+ * FIXME: Do we really need/want this HACK ?
+ * It looks unsafe. what happens if the clock gets into a
+ * broken state and we can't lock back on the old_rate ? Looks
+ * like an infinite recursion is possible
+ */
meson_clk_pll_set_rate(hw, old_rate, parent_rate);
}
- return ret;
+ return 0;
}
const struct clk_ops meson_clk_pll_ops = {
+ .init = meson_clk_pll_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.round_rate = meson_clk_pll_round_rate,
.set_rate = meson_clk_pll_set_rate,
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
new file mode 100644
index 000000000000..ab7a3556f5b2
--- /dev/null
+++ b/drivers/clk/meson/clk-regmap.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include "clk-regmap.h"
+
+static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_gate_data *gate = clk_get_regmap_gate_data(clk);
+ int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
+
+ set ^= enable;
+
+ return regmap_update_bits(clk->map, gate->offset, BIT(gate->bit_idx),
+ set ? BIT(gate->bit_idx) : 0);
+}
+
+static int clk_regmap_gate_enable(struct clk_hw *hw)
+{
+ return clk_regmap_gate_endisable(hw, 1);
+}
+
+static void clk_regmap_gate_disable(struct clk_hw *hw)
+{
+ clk_regmap_gate_endisable(hw, 0);
+}
+
+static int clk_regmap_gate_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_gate_data *gate = clk_get_regmap_gate_data(clk);
+ unsigned int val;
+
+ regmap_read(clk->map, gate->offset, &val);
+ if (gate->flags & CLK_GATE_SET_TO_DISABLE)
+ val ^= BIT(gate->bit_idx);
+
+ val &= BIT(gate->bit_idx);
+
+ return val ? 1 : 0;
+}
+
+const struct clk_ops clk_regmap_gate_ops = {
+ .enable = clk_regmap_gate_enable,
+ .disable = clk_regmap_gate_disable,
+ .is_enabled = clk_regmap_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_gate_ops);
+
+static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(clk->map, div->offset, &val);
+ if (ret)
+ /* Gives a hint that something is wrong */
+ return 0;
+
+ val >>= div->shift;
+ val &= clk_div_mask(div->width);
+ return divider_recalc_rate(hw, prate, val, div->table, div->flags,
+ div->width);
+}
+
+static long clk_regmap_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ /* if read only, just return current value */
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ ret = regmap_read(clk->map, div->offset, &val);
+ if (ret)
+ /* Gives a hint that something is wrong */
+ return 0;
+
+ val >>= div->shift;
+ val &= clk_div_mask(div->width);
+
+ return divider_ro_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags, val);
+ }
+
+ return divider_round_rate(hw, rate, prate, div->table, div->width,
+ div->flags);
+}
+
+static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = divider_get_val(rate, parent_rate, div->table, div->width,
+ div->flags);
+ if (ret < 0)
+ return ret;
+
+ val = (unsigned int)ret << div->shift;
+ return regmap_update_bits(clk->map, div->offset,
+ clk_div_mask(div->width) << div->shift, val);
+};
+
+/* Would prefer clk_regmap_div_ro_ops but clashes with qcom */
+
+const struct clk_ops clk_regmap_divider_ops = {
+ .recalc_rate = clk_regmap_div_recalc_rate,
+ .round_rate = clk_regmap_div_round_rate,
+ .set_rate = clk_regmap_div_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_divider_ops);
+
+const struct clk_ops clk_regmap_divider_ro_ops = {
+ .recalc_rate = clk_regmap_div_recalc_rate,
+ .round_rate = clk_regmap_div_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops);
+
+static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(clk->map, mux->offset, &val);
+ if (ret)
+ return ret;
+
+ val >>= mux->shift;
+ val &= mux->mask;
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+ unsigned int val = clk_mux_index_to_val(mux->table, mux->flags, index);
+
+ return regmap_update_bits(clk->map, mux->offset,
+ mux->mask << mux->shift,
+ val << mux->shift);
+}
+
+static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
+const struct clk_ops clk_regmap_mux_ops = {
+ .get_parent = clk_regmap_mux_get_parent,
+ .set_parent = clk_regmap_mux_set_parent,
+ .determine_rate = clk_regmap_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);
+
+const struct clk_ops clk_regmap_mux_ro_ops = {
+ .get_parent = clk_regmap_mux_get_parent,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
new file mode 100644
index 000000000000..627c888026d7
--- /dev/null
+++ b/drivers/clk/meson/clk-regmap.h
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#ifndef __CLK_REGMAP_H
+#define __CLK_REGMAP_H
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+/**
+ * struct clk_regmap - regmap backed clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @map: pointer to the regmap structure controlling the clock
+ * @data: data specific to the clock type
+ *
+ * Clock which is controlled by regmap backed registers. The actual type of
+ * of the clock is controlled by the clock_ops and data.
+ */
+struct clk_regmap {
+ struct clk_hw hw;
+ struct regmap *map;
+ void *data;
+};
+
+#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+
+/**
+ * struct clk_regmap_gate_data - regmap backed gate specific data
+ *
+ * @offset: offset of the register controlling gate
+ * @bit_idx: single bit controlling gate
+ * @flags: hardware-specific flags
+ *
+ * Flags:
+ * Same as clk_gate except CLK_GATE_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_gate_data {
+ unsigned int offset;
+ u8 bit_idx;
+ u8 flags;
+};
+
+static inline struct clk_regmap_gate_data *
+clk_get_regmap_gate_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_gate_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_gate_ops;
+
+/**
+ * struct clk_regmap_div_data - regmap backed adjustable divider specific data
+ *
+ * @offset: offset of the register controlling the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @table: array of value/divider pairs, last entry should have div = 0
+ *
+ * Flags:
+ * Same as clk_divider except CLK_DIVIDER_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_div_data {
+ unsigned int offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+};
+
+static inline struct clk_regmap_div_data *
+clk_get_regmap_div_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_div_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_divider_ops;
+extern const struct clk_ops clk_regmap_divider_ro_ops;
+
+/**
+ * struct clk_regmap_mux_data - regmap backed multiplexer clock specific data
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @offset: offset of theregister controlling multiplexer
+ * @table: array of parent indexed register values
+ * @shift: shift to multiplexer bit field
+ * @mask: mask of mutliplexer bit field
+ * @flags: hardware-specific flags
+ *
+ * Flags:
+ * Same as clk_divider except CLK_MUX_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_mux_data {
+ unsigned int offset;
+ u32 *table;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+};
+
+static inline struct clk_regmap_mux_data *
+clk_get_regmap_mux_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_mux_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_mux_ops;
+extern const struct clk_ops clk_regmap_mux_ro_ops;
+
+#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index c2ff0520ce53..8fe73c4edca8 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -18,6 +18,9 @@
#ifndef __CLKC_H
#define __CLKC_H
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
#define PMASK(width) GENMASK(width - 1, 0)
#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
@@ -35,13 +38,29 @@ struct parm {
u8 width;
};
+static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
+{
+ unsigned int val;
+
+ regmap_read(map, p->reg_off, &val);
+ return PARM_GET(p->width, p->shift, val);
+}
+
+static inline void meson_parm_write(struct regmap *map, struct parm *p,
+ unsigned int val)
+{
+ regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
+ val << p->shift);
+}
+
+
struct pll_rate_table {
unsigned long rate;
u16 m;
u16 n;
u16 od;
u16 od2;
- u16 frac;
+ u16 od3;
};
#define PLL_RATE(_r, _m, _n, _od) \
@@ -50,97 +69,53 @@ struct pll_rate_table {
.m = (_m), \
.n = (_n), \
.od = (_od), \
- } \
-
-#define PLL_FRAC_RATE(_r, _m, _n, _od, _od2, _frac) \
- { \
- .rate = (_r), \
- .m = (_m), \
- .n = (_n), \
- .od = (_od), \
- .od2 = (_od2), \
- .frac = (_frac), \
- } \
-
-struct pll_params_table {
- unsigned int reg_off;
- unsigned int value;
-};
-
-#define PLL_PARAM(_reg, _val) \
- { \
- .reg_off = (_reg), \
- .value = (_val), \
}
-struct pll_setup_params {
- struct pll_params_table *params_table;
- unsigned int params_count;
- /* Workaround for GP0, do not reset before configuring */
- bool no_init_reset;
- /* Workaround for GP0, unreset right before checking for lock */
- bool clear_reset_for_lock;
- /* Workaround for GXL GP0, reset in the lock checking loop */
- bool reset_lock_loop;
-};
+#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
-struct meson_clk_pll {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_pll_data {
struct parm m;
struct parm n;
struct parm frac;
struct parm od;
struct parm od2;
- const struct pll_setup_params params;
- const struct pll_rate_table *rate_table;
- unsigned int rate_count;
- spinlock_t *lock;
+ struct parm od3;
+ struct parm l;
+ struct parm rst;
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ const struct pll_rate_table *table;
+ u8 flags;
};
#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-struct meson_clk_cpu {
- struct clk_hw hw;
- void __iomem *base;
- u16 reg_off;
- struct notifier_block clk_nb;
- const struct clk_div_table *div_table;
-};
-
-int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event,
- void *data);
-
-struct meson_clk_mpll {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_mpll_data {
struct parm sdm;
struct parm sdm_en;
struct parm n2;
- struct parm en;
struct parm ssen;
+ struct parm misc;
spinlock_t *lock;
};
-struct meson_clk_audio_divider {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_audio_div_data {
struct parm div;
u8 flags;
- spinlock_t *lock;
};
#define MESON_GATE(_name, _reg, _bit) \
-struct clk_gate _name = { \
- .reg = (void __iomem *) _reg, \
- .bit_idx = (_bit), \
- .lock = &meson_clk_lock, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_gate_ops, \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
}, \
};
diff --git a/drivers/clk/meson/gxbb-aoclk-regmap.c b/drivers/clk/meson/gxbb-aoclk-regmap.c
deleted file mode 100644
index 2515fbfa0467..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-regmap.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2017 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
- */
-
-#include <linux/clk-provider.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include "gxbb-aoclk.h"
-
-static int aoclk_gate_regmap_enable(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
-
- return regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
- BIT(gate->bit_idx), BIT(gate->bit_idx));
-}
-
-static void aoclk_gate_regmap_disable(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
-
- regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
- BIT(gate->bit_idx), 0);
-}
-
-static int aoclk_gate_regmap_is_enabled(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gate->regmap, AO_RTI_GEN_CNTL_REG0, &val);
- if (ret)
- return ret;
-
- return (val & BIT(gate->bit_idx)) != 0;
-}
-
-const struct clk_ops meson_aoclk_gate_regmap_ops = {
- .enable = aoclk_gate_regmap_enable,
- .disable = aoclk_gate_regmap_disable,
- .is_enabled = aoclk_gate_regmap_is_enabled,
-};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 6c161e0a8e59..9ec23ae9a219 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -62,10 +62,9 @@
#include <linux/delay.h>
#include <dt-bindings/clock/gxbb-aoclkc.h>
#include <dt-bindings/reset/gxbb-aoclkc.h>
+#include "clk-regmap.h"
#include "gxbb-aoclk.h"
-static DEFINE_SPINLOCK(gxbb_aoclk_lock);
-
struct gxbb_aoclk_reset_controller {
struct reset_controller_dev reset;
unsigned int *data;
@@ -87,12 +86,14 @@ static const struct reset_control_ops gxbb_aoclk_reset_ops = {
};
#define GXBB_AO_GATE(_name, _bit) \
-static struct aoclk_gate_regmap _name##_ao = { \
- .bit_idx = (_bit), \
- .lock = &gxbb_aoclk_lock, \
+static struct clk_regmap _name##_ao = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = AO_RTI_GEN_CNTL_REG0, \
+ .bit_idx = (_bit), \
+ }, \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
- .ops = &meson_aoclk_gate_regmap_ops, \
+ .ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
.flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
@@ -107,7 +108,6 @@ GXBB_AO_GATE(uart2, 5);
GXBB_AO_GATE(ir_blaster, 6);
static struct aoclk_cec_32k cec_32k_ao = {
- .lock = &gxbb_aoclk_lock,
.hw.init = &(struct clk_init_data) {
.name = "cec_32k_ao",
.ops = &meson_aoclk_cec_32k_ops,
@@ -126,7 +126,7 @@ static unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct aoclk_gate_regmap *gxbb_aoclk_gate[] = {
+static struct clk_regmap *gxbb_aoclk_gate[] = {
[CLKID_AO_REMOTE] = &remote_ao,
[CLKID_AO_I2C_MASTER] = &i2c_master_ao,
[CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
@@ -177,10 +177,10 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev)
* Populate regmap and register all clks
*/
for (clkid = 0; clkid < ARRAY_SIZE(gxbb_aoclk_gate); clkid++) {
- gxbb_aoclk_gate[clkid]->regmap = regmap;
+ gxbb_aoclk_gate[clkid]->map = regmap;
ret = devm_clk_hw_register(dev,
- gxbb_aoclk_onecell_data.hws[clkid]);
+ gxbb_aoclk_onecell_data.hws[clkid]);
if (ret)
return ret;
}
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index e8604c8f7eee..badc4c22b4ee 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -17,22 +17,9 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-struct aoclk_gate_regmap {
- struct clk_hw hw;
- unsigned bit_idx;
- struct regmap *regmap;
- spinlock_t *lock;
-};
-
-#define to_aoclk_gate_regmap(_hw) \
- container_of(_hw, struct aoclk_gate_regmap, hw)
-
-extern const struct clk_ops meson_aoclk_gate_regmap_ops;
-
struct aoclk_cec_32k {
struct clk_hw hw;
struct regmap *regmap;
- spinlock_t *lock;
};
#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index af24455af5b4..b1e4d9557610 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -19,108 +19,19 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "gxbb.h"
+#include "clk-regmap.h"
static DEFINE_SPINLOCK(meson_clk_lock);
-static const struct pll_rate_table sys_pll_rate_table[] = {
- PLL_RATE(24000000, 56, 1, 2),
- PLL_RATE(48000000, 64, 1, 2),
- PLL_RATE(72000000, 72, 1, 2),
- PLL_RATE(96000000, 64, 1, 2),
- PLL_RATE(120000000, 80, 1, 2),
- PLL_RATE(144000000, 96, 1, 2),
- PLL_RATE(168000000, 56, 1, 1),
- PLL_RATE(192000000, 64, 1, 1),
- PLL_RATE(216000000, 72, 1, 1),
- PLL_RATE(240000000, 80, 1, 1),
- PLL_RATE(264000000, 88, 1, 1),
- PLL_RATE(288000000, 96, 1, 1),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(408000000, 68, 1, 2),
- PLL_RATE(432000000, 72, 1, 2),
- PLL_RATE(456000000, 76, 1, 2),
- PLL_RATE(480000000, 80, 1, 2),
- PLL_RATE(504000000, 84, 1, 2),
- PLL_RATE(528000000, 88, 1, 2),
- PLL_RATE(552000000, 92, 1, 2),
- PLL_RATE(576000000, 96, 1, 2),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(840000000, 70, 1, 1),
- PLL_RATE(864000000, 72, 1, 1),
- PLL_RATE(888000000, 74, 1, 1),
- PLL_RATE(912000000, 76, 1, 1),
- PLL_RATE(936000000, 78, 1, 1),
- PLL_RATE(960000000, 80, 1, 1),
- PLL_RATE(984000000, 82, 1, 1),
- PLL_RATE(1008000000, 84, 1, 1),
- PLL_RATE(1032000000, 86, 1, 1),
- PLL_RATE(1056000000, 88, 1, 1),
- PLL_RATE(1080000000, 90, 1, 1),
- PLL_RATE(1104000000, 92, 1, 1),
- PLL_RATE(1128000000, 94, 1, 1),
- PLL_RATE(1152000000, 96, 1, 1),
- PLL_RATE(1176000000, 98, 1, 1),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
- PLL_RATE(1560000000, 65, 1, 0),
- PLL_RATE(1584000000, 66, 1, 0),
- PLL_RATE(1608000000, 67, 1, 0),
- PLL_RATE(1632000000, 68, 1, 0),
- PLL_RATE(1656000000, 68, 1, 0),
- PLL_RATE(1680000000, 68, 1, 0),
- PLL_RATE(1704000000, 68, 1, 0),
- PLL_RATE(1728000000, 69, 1, 0),
- PLL_RATE(1752000000, 69, 1, 0),
- PLL_RATE(1776000000, 69, 1, 0),
- PLL_RATE(1800000000, 69, 1, 0),
- PLL_RATE(1824000000, 70, 1, 0),
- PLL_RATE(1848000000, 70, 1, 0),
- PLL_RATE(1872000000, 70, 1, 0),
- PLL_RATE(1896000000, 70, 1, 0),
- PLL_RATE(1920000000, 71, 1, 0),
- PLL_RATE(1944000000, 71, 1, 0),
- PLL_RATE(1968000000, 71, 1, 0),
- PLL_RATE(1992000000, 71, 1, 0),
- PLL_RATE(2016000000, 72, 1, 0),
- PLL_RATE(2040000000, 72, 1, 0),
- PLL_RATE(2064000000, 72, 1, 0),
- PLL_RATE(2088000000, 72, 1, 0),
- PLL_RATE(2112000000, 73, 1, 0),
- { /* sentinel */ },
-};
-
static const struct pll_rate_table gxbb_gp0_pll_rate_table[] = {
PLL_RATE(96000000, 32, 1, 3),
PLL_RATE(99000000, 33, 1, 3),
@@ -278,23 +189,39 @@ static const struct pll_rate_table gxl_gp0_pll_rate_table[] = {
{ /* sentinel */ },
};
-static struct meson_clk_pll gxbb_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap gxbb_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -304,33 +231,118 @@ static struct meson_clk_pll gxbb_fixed_pll = {
},
};
-static struct meson_clk_pll gxbb_hdmi_pll = {
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
+ .mult = 2,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_pre_mult",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
},
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 0,
- .width = 12,
+};
+
+static struct clk_regmap gxbb_hdmi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .od = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 22,
+ .width = 2,
+ },
+ .od3 = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 18,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 28,
+ .width = 1,
+ },
},
- .od = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 16,
- .width = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_pre_mult" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
},
- .od2 = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 22,
- .width = 2,
+};
+
+static struct clk_regmap gxl_hdmi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .frac = {
+ /*
+ * On gxl, there is a register shift due to
+ * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb,
+ * so we compute the register offset based on the PLL
+ * base to get it right
+ */
+ .reg_off = HHI_HDMI_PLL_CNTL + 4,
+ .shift = 0,
+ .width = 12,
+ },
+ .od = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 21,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 23,
+ .width = 2,
+ },
+ .od3 = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 19,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -340,25 +352,34 @@ static struct meson_clk_pll gxbb_hdmi_pll = {
},
};
-static struct meson_clk_pll gxbb_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_regmap gxbb_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 10,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 10,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -368,38 +389,44 @@ static struct meson_clk_pll gxbb_sys_pll = {
},
};
-struct pll_params_table gxbb_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x6a000228),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0x69c80000),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a5590c4),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0x0000500d),
-};
-
-static struct meson_clk_pll gxbb_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .params = {
- .params_table = gxbb_gp0_params_table,
- .params_count = ARRAY_SIZE(gxbb_gp0_params_table),
- .no_init_reset = true,
- .clear_reset_for_lock = true,
+static const struct reg_sequence gxbb_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x4a000228 },
+};
+
+static struct clk_regmap gxbb_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = gxbb_gp0_pll_rate_table,
+ .init_regs = gxbb_gp0_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
},
- .rate_table = gxbb_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(gxbb_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -409,40 +436,51 @@ static struct meson_clk_pll gxbb_gp0_pll = {
},
};
-struct pll_params_table gxl_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250),
- PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d),
- PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000),
-};
-
-static struct meson_clk_pll gxl_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
+static const struct reg_sequence gxl_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap gxl_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 10,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = gxl_gp0_pll_rate_table,
+ .init_regs = gxl_gp0_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
},
- .params = {
- .params_table = gxl_gp0_params_table,
- .params_count = ARRAY_SIZE(gxl_gp0_params_table),
- .no_init_reset = true,
- .reset_lock_loop = true,
- },
- .rate_table = gxl_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(gxl_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -452,161 +490,267 @@ static struct meson_clk_pll gxl_gp0_pll = {
},
};
-static struct clk_fixed_factor gxbb_fclk_div2 = {
+static struct clk_fixed_factor gxbb_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div3 = {
+static struct clk_regmap gxbb_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div4 = {
+static struct clk_regmap gxbb_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div5 = {
+static struct clk_regmap gxbb_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div7 = {
+static struct clk_regmap gxbb_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap gxbb_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap gxbb_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
+static struct clk_regmap gxbb_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
+static struct clk_regmap gxbb_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-/*
- * FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
- * and should be modeled with their respective PLLs via the forthcoming
- * coordinated clock rates feature
- */
+static struct clk_regmap gxbb_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
@@ -614,16 +758,16 @@ static const char * const clk81_parent_names[] = {
"fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
@@ -631,72 +775,75 @@ static struct clk_mux gxbb_mpeg_clk_sel = {
*/
.parent_names = clk81_parent_names,
.num_parents = ARRAY_SIZE(clk81_parent_names),
- .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
},
};
-static struct clk_divider gxbb_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ro_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
},
};
-/* the mother of dragons^W gates */
-static struct clk_gate gxbb_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+/* the mother of dragons gates */
+static struct clk_regmap gxbb_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ .flags = CLK_IS_CRITICAL,
},
};
-static struct clk_mux gxbb_sar_adc_clk_sel = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/* NOTE: The datasheet doesn't list the parents for bit 10 */
.parent_names = (const char *[]){ "xtal", "clk81", },
.num_parents = 2,
},
};
-static struct clk_divider gxbb_sar_adc_clk_div = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .shift = 0,
- .width = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sar_adc_clk_sel" },
.num_parents = 1,
},
};
-static struct clk_gate gxbb_sar_adc_clk = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sar_adc_clk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -708,21 +855,20 @@ static struct clk_gate gxbb_sar_adc_clk = {
* muxed by a glitch-free switch.
*/
-static u32 mux_table_mali_0_1[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const char * const gxbb_mali_0_1_parent_names[] = {
"xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
"fclk_div4", "fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_mali_0_sel = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .table = mux_table_mali_0_1,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 10:9 selects from 8 possible parents:
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
@@ -734,42 +880,44 @@ static struct clk_mux gxbb_mali_0_sel = {
},
};
-static struct clk_divider gxbb_mali_0_div = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mali_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_gate gxbb_mali_0 = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mali_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_mali_1_sel = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .table = mux_table_mali_0_1,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 10:9 selects from 8 possible parents:
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
@@ -781,77 +929,79 @@ static struct clk_mux gxbb_mali_1_sel = {
},
};
-static struct clk_divider gxbb_mali_1_div = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mali_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_gate gxbb_mali_1 = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mali_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_mali[] = {0, 1};
static const char * const gxbb_mali_parent_names[] = {
"mali_0", "mali_1"
};
-static struct clk_mux gxbb_mali = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 1,
- .shift = 31,
- .table = mux_table_mali,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_mali_parent_names,
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_mux gxbb_cts_amclk_sel = {
- .reg = (void *) HHI_AUD_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- /* Default parent unknown (register reset value: 0) */
- .table = (u32[]){ 1, 2, 3 },
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_cts_amclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .table = (u32[]){ 1, 2, 3 },
+ },
+ .hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
- .div = {
- .reg_off = HHI_AUD_CLK_CNTL,
- .shift = 0,
- .width = 8,
+static struct clk_regmap gxbb_cts_amclk_div = {
+ .data = &(struct meson_clk_audio_div_data){
+ .div = {
+ .reg_off = HHI_AUD_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
+ },
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &meson_clk_audio_divider_ops,
@@ -861,71 +1011,75 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
},
};
-static struct clk_gate gxbb_cts_amclk = {
- .reg = (void *) HHI_AUD_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_amclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_AUD_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "cts_amclk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "cts_amclk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_cts_mclk_i958_sel = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .mask = 0x3,
- .shift = 25,
- /* Default parent unknown (register reset value: 0) */
- .table = (u32[]){ 1, 2, 3 },
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_mclk_i958_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .mask = 0x3,
+ .shift = 25,
+ .table = (u32[]){ 1, 2, 3 },
+ },
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_cts_mclk_i958_div = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .shift = 16,
- .width = 8,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_cts_mclk_i958_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .shift = 16,
+ .width = 8,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cts_mclk_i958_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_cts_mclk_i958 = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_mclk_i958 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data){
.name = "cts_mclk_i958",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "cts_mclk_i958_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_cts_i958 = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .mask = 0x1,
- .shift = 27,
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_cts_i958 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .mask = 0x1,
+ .shift = 27,
+ },
+ .hw.init = &(struct clk_init_data){
.name = "cts_i958",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "cts_amclk", "cts_mclk_i958" },
.num_parents = 2,
/*
@@ -936,27 +1090,29 @@ static struct clk_mux gxbb_cts_i958 = {
},
};
-static struct clk_divider gxbb_32k_clk_div = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .shift = 0,
- .width = 14,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_32k_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .shift = 0,
+ .width = 14,
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "32k_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
},
};
-static struct clk_gate gxbb_32k_clk = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .bit_idx = 15,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_32k_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .bit_idx = 15,
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "32k_clk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -967,14 +1123,15 @@ static const char * const gxbb_32k_clk_parent_names[] = {
"xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_32k_clk_sel = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .mask = 0x3,
- .shift = 16,
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_32k_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_32k_clk_parent_names,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
@@ -993,42 +1150,45 @@ static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
};
/* SDIO clock */
-static struct clk_mux gxbb_sd_emmc_a_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_a_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_a_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_a_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_a_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_a_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1036,42 +1196,45 @@ static struct clk_gate gxbb_sd_emmc_a_clk0 = {
};
/* SDcard clock */
-static struct clk_mux gxbb_sd_emmc_b_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_b_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_b_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 23,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1079,42 +1242,45 @@ static struct clk_gate gxbb_sd_emmc_b_clk0 = {
};
/* EMMC/NAND clock */
-static struct clk_mux gxbb_sd_emmc_c_clk0_sel = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_c_clk0_div = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_c_clk0 = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1123,20 +1289,19 @@ static struct clk_gate gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static u32 mux_table_vpu[] = {0, 1, 2, 3};
static const char * const gxbb_vpu_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
-static struct clk_mux gxbb_vpu_0_sel = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
- .table = mux_table_vpu,
+static struct clk_regmap gxbb_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1147,42 +1312,44 @@ static struct clk_mux gxbb_vpu_0_sel = {
},
};
-static struct clk_divider gxbb_vpu_0_div = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vpu_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vpu_0 = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vpu_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vpu_1_sel = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 0x3,
- .shift = 25,
- .lock = &meson_clk_lock,
- .table = mux_table_vpu,
+static struct clk_regmap gxbb_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1193,41 +1360,44 @@ static struct clk_mux gxbb_vpu_1_sel = {
},
};
-static struct clk_divider gxbb_vpu_1_div = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vpu_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vpu_1 = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vpu_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vpu = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 1,
- .shift = 31,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bit 31 selects from 2 possible parents:
* vpu_0 or vpu_1
@@ -1240,20 +1410,19 @@ static struct clk_mux gxbb_vpu = {
/* VAPB Clock */
-static u32 mux_table_vapb[] = {0, 1, 2, 3};
static const char * const gxbb_vapb_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
-static struct clk_mux gxbb_vapb_0_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
- .table = mux_table_vapb,
+static struct clk_regmap gxbb_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1264,42 +1433,44 @@ static struct clk_mux gxbb_vapb_0_sel = {
},
};
-static struct clk_divider gxbb_vapb_0_div = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vapb_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vapb_0 = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vapb_1_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 0x3,
- .shift = 25,
- .lock = &meson_clk_lock,
- .table = mux_table_vapb,
+static struct clk_regmap gxbb_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1310,41 +1481,44 @@ static struct clk_mux gxbb_vapb_1_sel = {
},
};
-static struct clk_divider gxbb_vapb_1_div = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vapb_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vapb_1 = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vapb_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 1,
- .shift = 31,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bit 31 selects from 2 possible parents:
* vapb_0 or vapb_1
@@ -1355,13 +1529,14 @@ static struct clk_mux gxbb_vapb_sel = {
},
};
-static struct clk_gate gxbb_vapb = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 30,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
@@ -1601,6 +1776,16 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
+ [CLKID_HDMI_PLL_PRE_MULT] = &gxbb_hdmi_pll_pre_mult.hw,
+ [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
+ [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &gxbb_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &gxbb_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1609,7 +1794,7 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
static struct clk_hw_onecell_data gxl_hw_onecell_data = {
.hws = {
[CLKID_SYS_PLL] = &gxbb_sys_pll.hw,
- [CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw,
+ [CLKID_HDMI_PLL] = &gxl_hdmi_pll.hw,
[CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw,
[CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw,
[CLKID_FCLK_DIV3] = &gxbb_fclk_div3.hw,
@@ -1748,34 +1933,31 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
+ [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
+ [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &gxbb_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &gxbb_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience tables to populate base addresses in .probe */
-
-static struct meson_clk_pll *const gxbb_clk_plls[] = {
- &gxbb_fixed_pll,
- &gxbb_hdmi_pll,
- &gxbb_sys_pll,
+static struct clk_regmap *const gxbb_clk_regmaps[] = {
&gxbb_gp0_pll,
-};
-
-static struct meson_clk_pll *const gxl_clk_plls[] = {
- &gxbb_fixed_pll,
&gxbb_hdmi_pll,
- &gxbb_sys_pll,
- &gxl_gp0_pll,
};
-static struct meson_clk_mpll *const gxbb_clk_mplls[] = {
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
+static struct clk_regmap *const gxl_clk_regmaps[] = {
+ &gxl_gp0_pll,
+ &gxl_hdmi_pll,
};
-static struct clk_gate *const gxbb_clk_gates[] = {
+static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_clk81,
&gxbb_ddr,
&gxbb_dos,
@@ -1872,9 +2054,19 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_vapb_0,
&gxbb_vapb_1,
&gxbb_vapb,
-};
-
-static struct clk_mux *const gxbb_clk_muxes[] = {
+ &gxbb_mpeg_clk_div,
+ &gxbb_sar_adc_clk_div,
+ &gxbb_mali_0_div,
+ &gxbb_mali_1_div,
+ &gxbb_cts_mclk_i958_div,
+ &gxbb_32k_clk_div,
+ &gxbb_sd_emmc_a_clk0_div,
+ &gxbb_sd_emmc_b_clk0_div,
+ &gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
&gxbb_mpeg_clk_sel,
&gxbb_sar_adc_clk_sel,
&gxbb_mali_0_sel,
@@ -1893,73 +2085,38 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_vapb_0_sel,
&gxbb_vapb_1_sel,
&gxbb_vapb_sel,
-};
-
-static struct clk_divider *const gxbb_clk_dividers[] = {
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
-};
-
-static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
+ &gxbb_mpll0,
+ &gxbb_mpll1,
+ &gxbb_mpll2,
+ &gxbb_mpll0_div,
+ &gxbb_mpll1_div,
+ &gxbb_mpll2_div,
&gxbb_cts_amclk_div,
+ &gxbb_fixed_pll,
+ &gxbb_sys_pll,
+ &gxbb_mpll_prediv,
+ &gxbb_fclk_div2,
+ &gxbb_fclk_div3,
+ &gxbb_fclk_div4,
+ &gxbb_fclk_div5,
+ &gxbb_fclk_div7,
};
struct clkc_data {
- struct clk_gate *const *clk_gates;
- unsigned int clk_gates_count;
- struct meson_clk_mpll *const *clk_mplls;
- unsigned int clk_mplls_count;
- struct meson_clk_pll *const *clk_plls;
- unsigned int clk_plls_count;
- struct clk_mux *const *clk_muxes;
- unsigned int clk_muxes_count;
- struct clk_divider *const *clk_dividers;
- unsigned int clk_dividers_count;
- struct meson_clk_audio_divider *const *clk_audio_dividers;
- unsigned int clk_audio_dividers_count;
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clks_count;
struct clk_hw_onecell_data *hw_onecell_data;
};
static const struct clkc_data gxbb_clkc_data = {
- .clk_gates = gxbb_clk_gates,
- .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates),
- .clk_mplls = gxbb_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls),
- .clk_plls = gxbb_clk_plls,
- .clk_plls_count = ARRAY_SIZE(gxbb_clk_plls),
- .clk_muxes = gxbb_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes),
- .clk_dividers = gxbb_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
- .clk_audio_dividers = gxbb_audio_dividers,
- .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
+ .regmap_clks = gxbb_clk_regmaps,
+ .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps),
.hw_onecell_data = &gxbb_hw_onecell_data,
};
static const struct clkc_data gxl_clkc_data = {
- .clk_gates = gxbb_clk_gates,
- .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates),
- .clk_mplls = gxbb_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls),
- .clk_plls = gxl_clk_plls,
- .clk_plls_count = ARRAY_SIZE(gxl_clk_plls),
- .clk_muxes = gxbb_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes),
- .clk_dividers = gxbb_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
- .clk_audio_dividers = gxbb_audio_dividers,
- .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
+ .regmap_clks = gxl_clk_regmaps,
+ .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps),
.hw_onecell_data = &gxl_hw_onecell_data,
};
@@ -1969,71 +2126,79 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int gxbb_clkc_probe(struct platform_device *pdev)
{
const struct clkc_data *clkc_data;
+ struct resource *res;
void __iomem *clk_base;
- int ret, clkid, i;
+ struct regmap *map;
+ int ret, i;
struct device *dev = &pdev->dev;
- clkc_data = of_device_get_match_data(&pdev->dev);
+ clkc_data = of_device_get_match_data(dev);
if (!clkc_data)
return -EINVAL;
- /* Generic clocks and PLLs */
- clk_base = of_iomap(dev->of_node, 0);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- /* Populate base address for PLLs */
- for (i = 0; i < clkc_data->clk_plls_count; i++)
- clkc_data->clk_plls[i]->base = clk_base;
-
- /* Populate base address for MPLLs */
- for (i = 0; i < clkc_data->clk_mplls_count; i++)
- clkc_data->clk_mplls[i]->base = clk_base;
+ /* Get the hhi system controller node if available */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap - Trying obsolete regs\n");
- /* Populate base address for gates */
- for (i = 0; i < clkc_data->clk_gates_count; i++)
- clkc_data->clk_gates[i]->reg = clk_base +
- (u64)clkc_data->clk_gates[i]->reg;
-
- /* Populate base address for muxes */
- for (i = 0; i < clkc_data->clk_muxes_count; i++)
- clkc_data->clk_muxes[i]->reg = clk_base +
- (u64)clkc_data->clk_muxes[i]->reg;
+ /*
+ * FIXME: HHI registers should be accessed through
+ * the appropriate system controller. This is required because
+ * there is more than just clocks in this register space
+ *
+ * This fallback method is only provided temporarily until
+ * all the platform DTs are properly using the syscon node
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ clk_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!clk_base) {
+ dev_err(dev, "Unable to map clk base\n");
+ return -ENXIO;
+ }
+
+ map = devm_regmap_init_mmio(dev, clk_base,
+ &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ }
- /* Populate base address for dividers */
- for (i = 0; i < clkc_data->clk_dividers_count; i++)
- clkc_data->clk_dividers[i]->reg = clk_base +
- (u64)clkc_data->clk_dividers[i]->reg;
+ /* Populate regmap for the common regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
+ gx_clk_regmaps[i]->map = map;
- /* Populate base address for the audio dividers */
- for (i = 0; i < clkc_data->clk_audio_dividers_count; i++)
- clkc_data->clk_audio_dividers[i]->base = clk_base;
+ /* Populate regmap for soc specific clocks */
+ for (i = 0; i < clkc_data->regmap_clks_count; i++)
+ clkc_data->regmap_clks[i]->map = map;
- /*
- * register all clks
- */
- for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) {
+ /* Register all clks */
+ for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
/* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[clkid])
+ if (!clkc_data->hw_onecell_data->hws[i])
continue;
ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[clkid]);
- if (ret)
- goto iounmap;
+ clkc_data->hw_onecell_data->hws[i]);
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
+ return ret;
+ }
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
-
-iounmap:
- iounmap(clk_base);
- return ret;
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ clkc_data->hw_onecell_data);
}
static struct platform_driver gxbb_driver = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index aee6fbba2004..9febf3f03739 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -194,8 +194,18 @@
#define CLKID_VPU_1_DIV 130
#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_1_DIV 137
-
-#define NR_CLKS 141
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+
+#define NR_CLKS 151
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 3ffea80c1308..d0524ec71aad 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -23,14 +23,16 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "meson8b.h"
+#include "clk-regmap.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -97,20 +99,6 @@ static const struct pll_rate_table sys_pll_rate_table[] = {
{ /* sentinel */ },
};
-static const struct clk_div_table cpu_div_table[] = {
- { .val = 1, .div = 1 },
- { .val = 2, .div = 2 },
- { .val = 3, .div = 3 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { .val = 8, .div = 16 },
- { /* sentinel */ },
-};
-
static struct clk_fixed_rate meson8b_xtal = {
.fixed_rate = 24000000,
.hw.init = &(struct clk_init_data){
@@ -120,23 +108,39 @@ static struct clk_fixed_rate meson8b_xtal = {
},
};
-static struct meson_clk_pll meson8b_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
+static struct clk_regmap meson8b_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -146,23 +150,34 @@ static struct meson_clk_pll meson8b_fixed_pll = {
},
};
-static struct meson_clk_pll meson8b_vid_pll = {
- .m = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap meson8b_vid_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vid_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -172,213 +187,317 @@ static struct meson_clk_pll meson8b_vid_pll = {
},
};
-static struct meson_clk_pll meson8b_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_regmap meson8b_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = sys_pll_rate_table,
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
- .ops = &meson_clk_pll_ops,
+ .ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
};
-static struct clk_fixed_factor meson8b_fclk_div2 = {
+static struct clk_fixed_factor meson8b_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div3 = {
+static struct clk_regmap meson8b_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div4 = {
+static struct clk_regmap meson8b_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div5 = {
+static struct clk_regmap meson8b_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div7 = {
+static struct clk_regmap meson8b_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap meson8b_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap meson8b_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
+static struct clk_regmap meson8b_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
+static struct clk_regmap meson8b_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-/*
- * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL
- * post-dividers and should be modeled with their respective PLLs via the
- * forthcoming coordinated clock rates feature
- */
-static struct meson_clk_cpu meson8b_cpu_clk = {
- .reg_off = HHI_SYS_CPU_CLK_CNTL1,
- .div_table = cpu_div_table,
- .clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
+static struct clk_regmap meson8b_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
+ },
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk",
- .ops = &meson_clk_cpu_ops,
- .parent_names = (const char *[]){ "sys_pll" },
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
static u32 mux_table_clk81[] = { 6, 5, 7 };
-
-struct clk_mux meson8b_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
@@ -387,34 +506,137 @@ struct clk_mux meson8b_mpeg_clk_sel = {
.parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
"fclk_div5" },
.num_parents = 3,
- .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
},
};
-struct clk_divider meson8b_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ro_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
},
};
-struct clk_gate meson8b_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_in_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_in_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "xtal", "sys_pll" },
+ .num_parents = 2,
+ .flags = (CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_div3 = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_div3",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_div_table cpu_scale_table[] = {
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+ { .val = 4, .div = 8 },
+ { .val = 5, .div = 10 },
+ { .val = 6, .div = 12 },
+ { .val = 7, .div = 14 },
+ { .val = 8, .div = 16 },
+ { /* sentinel */ },
+};
+
+static struct clk_regmap meson8b_cpu_scale_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 20,
+ .width = 9,
+ .table = cpu_scale_table,
+ .flags = CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_scale_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_scale_out_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_scale_out_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]) { "cpu_in_sel",
+ "cpu_div2",
+ "cpu_div3",
+ "cpu_scale_div" },
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_clk = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "xtal",
+ "cpu_scale_out_sel" },
+ .num_parents = 2,
+ .flags = (CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
},
};
@@ -599,24 +821,26 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MPLL0] = &meson8b_mpll0.hw,
[CLKID_MPLL1] = &meson8b_mpll1.hw,
[CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_DIV2] = &meson8b_cpu_div2.hw,
+ [CLKID_CPU_DIV3] = &meson8b_cpu_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
};
-static struct meson_clk_pll *const meson8b_clk_plls[] = {
- &meson8b_fixed_pll,
- &meson8b_vid_pll,
- &meson8b_sys_pll,
-};
-
-static struct meson_clk_mpll *const meson8b_clk_mplls[] = {
- &meson8b_mpll0,
- &meson8b_mpll1,
- &meson8b_mpll2,
-};
-
-static struct clk_gate *const meson8b_clk_gates[] = {
+static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_clk81,
&meson8b_ddr,
&meson8b_dos,
@@ -695,14 +919,27 @@ static struct clk_gate *const meson8b_clk_gates[] = {
&meson8b_ao_ahb_sram,
&meson8b_ao_ahb_bus,
&meson8b_ao_iface,
-};
-
-static struct clk_mux *const meson8b_clk_muxes[] = {
- &meson8b_mpeg_clk_sel,
-};
-
-static struct clk_divider *const meson8b_clk_dividers[] = {
&meson8b_mpeg_clk_div,
+ &meson8b_mpeg_clk_sel,
+ &meson8b_mpll0,
+ &meson8b_mpll1,
+ &meson8b_mpll2,
+ &meson8b_mpll0_div,
+ &meson8b_mpll1_div,
+ &meson8b_mpll2_div,
+ &meson8b_fixed_pll,
+ &meson8b_vid_pll,
+ &meson8b_sys_pll,
+ &meson8b_cpu_in_sel,
+ &meson8b_cpu_scale_div,
+ &meson8b_cpu_scale_out_sel,
+ &meson8b_cpu_clk,
+ &meson8b_mpll_prediv,
+ &meson8b_fclk_div2,
+ &meson8b_fclk_div3,
+ &meson8b_fclk_div4,
+ &meson8b_fclk_div5,
+ &meson8b_fclk_div7,
};
static const struct meson8b_clk_reset_line {
@@ -804,82 +1041,45 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
.deassert = meson8b_clk_reset_deassert,
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int meson8b_clkc_probe(struct platform_device *pdev)
{
- int ret, clkid, i;
- struct clk_hw *parent_hw;
- struct clk *parent_clk;
+ int ret, i;
struct device *dev = &pdev->dev;
+ struct regmap *map;
if (!clk_base)
return -ENXIO;
- /* Populate base address for PLLs */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++)
- meson8b_clk_plls[i]->base = clk_base;
-
- /* Populate base address for MPLLs */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_mplls); i++)
- meson8b_clk_mplls[i]->base = clk_base;
-
- /* Populate the base address for CPU clk */
- meson8b_cpu_clk.base = clk_base;
-
- /* Populate base address for gates */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++)
- meson8b_clk_gates[i]->reg = clk_base +
- (u32)meson8b_clk_gates[i]->reg;
-
- /* Populate base address for muxes */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_muxes); i++)
- meson8b_clk_muxes[i]->reg = clk_base +
- (u32)meson8b_clk_muxes[i]->reg;
+ map = devm_regmap_init_mmio(dev, clk_base, &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
- /* Populate base address for dividers */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_dividers); i++)
- meson8b_clk_dividers[i]->reg = clk_base +
- (u32)meson8b_clk_dividers[i]->reg;
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
+ meson8b_clk_regmaps[i]->map = map;
/*
* register all clks
* CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
*/
- for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) {
+ for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
- if (!meson8b_hw_onecell_data.hws[clkid])
+ if (!meson8b_hw_onecell_data.hws[i])
continue;
- /* FIXME convert to devm_clk_register */
- ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]);
+ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[i]);
if (ret)
return ret;
}
- /*
- * Register CPU clk notifier
- *
- * FIXME this is wrong for a lot of reasons. First, the muxes should be
- * struct clk_hw objects. Second, we shouldn't program the muxes in
- * notifier handlers. The tricky programming sequence will be handled
- * by the forthcoming coordinated clock rates mechanism once that
- * feature is released.
- *
- * Furthermore, looking up the parent this way is terrible. At some
- * point we will stop allocating a default struct clk when registering
- * a new clk_hw, and this hack will no longer work. Releasing the ccr
- * feature before that time solves the problem :-)
- */
- parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw);
- parent_clk = parent_hw->clk;
- ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb);
- if (ret) {
- pr_err("%s: failed to register clock notifier for cpu_clk\n",
- __func__);
- return ret;
- }
-
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- &meson8b_hw_onecell_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &meson8b_hw_onecell_data);
}
static const struct of_device_id meson8b_clkc_match_table[] = {
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 2eaf8a52e7dd..6e414bd36981 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -69,7 +69,22 @@
* will remain defined here.
*/
-#define CLK_NR_CLKS 96
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_DIV2 100
+#define CLKID_CPU_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+
+#define CLK_NR_CLKS 110
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 394aa6f03f01..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
- 0, 0, 0, 0,
- 1066 * 1000 * 1000, 0, 0, 0,
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
1600 * 1000 * 1000, 0, 0, 0,
- 1866 * 1000 * 1000,
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index ca9a0a536174..75bf7b8f282f 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -13,18 +13,17 @@
/*
* CP110 has 6 core clocks:
*
- * - APLL (1 Ghz)
- * - PPv2 core (1/3 APLL)
- * - EIP (1/2 APLL)
- * - Core (1/2 EIP)
- * - SDIO (2/5 APLL)
+ * - PLL0 (1 Ghz)
+ * - PPv2 core (1/3 PLL0)
+ * - x2 Core (1/2 PLL0)
+ * - Core (1/2 x2 Core)
+ * - SDIO (2/5 PLL0)
*
* - NAND clock, which is either:
* - Equal to SDIO clock
- * - 2/5 APLL
+ * - 2/5 PLL0
*
- * CP110 has 32 gatable clocks, for the various peripherals in the
- * IP. They have fairly complicated parent/child relationships.
+ * CP110 has 32 gatable clocks, for the various peripherals in the IP.
*/
#define pr_fmt(fmt) "cp110-system-controller: " fmt
@@ -53,9 +52,9 @@ enum {
#define CP110_CLK_NUM \
(CP110_MAX_CORE_CLOCKS + CP110_MAX_GATABLE_CLOCKS)
-#define CP110_CORE_APLL 0
+#define CP110_CORE_PLL0 0
#define CP110_CORE_PPV2 1
-#define CP110_CORE_EIP 2
+#define CP110_CORE_X2CORE 2
#define CP110_CORE_CORE 3
#define CP110_CORE_NAND 4
#define CP110_CORE_SDIO 5
@@ -237,7 +236,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
struct regmap *regmap;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name,
+ const char *ppv2_name, *pll0_name, *core_name, *x2core_name, *nand_name,
*sdio_name;
struct clk_hw_onecell_data *cp110_clk_data;
struct clk_hw *hw, **cp110_clks;
@@ -263,20 +262,20 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks = cp110_clk_data->hws;
cp110_clk_data->num = CP110_CLK_NUM;
- /* Register the APLL which is the root of the hw tree */
- apll_name = cp110_unique_name(dev, syscon_node, "apll");
- hw = clk_hw_register_fixed_rate(NULL, apll_name, NULL, 0,
+ /* Register the PLL0 which is the root of the hw tree */
+ pll0_name = cp110_unique_name(dev, syscon_node, "pll0");
+ hw = clk_hw_register_fixed_rate(NULL, pll0_name, NULL, 0,
1000 * 1000 * 1000);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail_apll;
+ goto fail_pll0;
}
- cp110_clks[CP110_CORE_APLL] = hw;
+ cp110_clks[CP110_CORE_PLL0] = hw;
- /* PPv2 is APLL/3 */
+ /* PPv2 is PLL0/3 */
ppv2_name = cp110_unique_name(dev, syscon_node, "ppv2-core");
- hw = clk_hw_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
+ hw = clk_hw_register_fixed_factor(NULL, ppv2_name, pll0_name, 0, 1, 3);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_ppv2;
@@ -284,30 +283,32 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks[CP110_CORE_PPV2] = hw;
- /* EIP clock is APLL/2 */
- eip_name = cp110_unique_name(dev, syscon_node, "eip");
- hw = clk_hw_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
+ /* X2CORE clock is PLL0/2 */
+ x2core_name = cp110_unique_name(dev, syscon_node, "x2core");
+ hw = clk_hw_register_fixed_factor(NULL, x2core_name, pll0_name,
+ 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_eip;
}
- cp110_clks[CP110_CORE_EIP] = hw;
+ cp110_clks[CP110_CORE_X2CORE] = hw;
- /* Core clock is EIP/2 */
+ /* Core clock is X2CORE/2 */
core_name = cp110_unique_name(dev, syscon_node, "core");
- hw = clk_hw_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
+ hw = clk_hw_register_fixed_factor(NULL, core_name, x2core_name,
+ 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_core;
}
cp110_clks[CP110_CORE_CORE] = hw;
- /* NAND can be either APLL/2.5 or core clock */
+ /* NAND can be either PLL0/2.5 or core clock */
nand_name = cp110_unique_name(dev, syscon_node, "nand-core");
if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
hw = clk_hw_register_fixed_factor(NULL, nand_name,
- apll_name, 0, 2, 5);
+ pll0_name, 0, 2, 5);
else
hw = clk_hw_register_fixed_factor(NULL, nand_name,
core_name, 0, 1, 1);
@@ -318,10 +319,10 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks[CP110_CORE_NAND] = hw;
- /* SDIO clock is APLL/2.5 */
+ /* SDIO clock is PLL0/2.5 */
sdio_name = cp110_unique_name(dev, syscon_node, "sdio-core");
hw = clk_hw_register_fixed_factor(NULL, sdio_name,
- apll_name, 0, 2, 5);
+ pll0_name, 0, 2, 5);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_sdio;
@@ -341,40 +342,23 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
continue;
switch (i) {
- case CP110_GATE_AUDIO:
- case CP110_GATE_COMM_UNIT:
- case CP110_GATE_EIP150:
- case CP110_GATE_EIP197:
- case CP110_GATE_SLOW_IO:
- parent = gate_name[CP110_GATE_MAIN];
- break;
- case CP110_GATE_MG:
- parent = gate_name[CP110_GATE_MG_CORE];
- break;
case CP110_GATE_NAND:
parent = nand_name;
break;
+ case CP110_GATE_MG:
+ case CP110_GATE_GOP_DP:
case CP110_GATE_PPV2:
parent = ppv2_name;
break;
case CP110_GATE_SDIO:
parent = sdio_name;
break;
- case CP110_GATE_GOP_DP:
- parent = gate_name[CP110_GATE_SDMMC_GOP];
- break;
- case CP110_GATE_XOR1:
- case CP110_GATE_XOR0:
- case CP110_GATE_PCIE_X1_0:
- case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_MAIN:
+ case CP110_GATE_PCIE_XOR:
case CP110_GATE_PCIE_X4:
- parent = gate_name[CP110_GATE_PCIE_XOR];
- break;
- case CP110_GATE_SATA:
- case CP110_GATE_USB3H0:
- case CP110_GATE_USB3H1:
- case CP110_GATE_USB3DEV:
- parent = gate_name[CP110_GATE_SATA_USB];
+ case CP110_GATE_EIP150:
+ case CP110_GATE_EIP197:
+ parent = x2core_name;
break;
default:
parent = core_name;
@@ -413,12 +397,12 @@ fail_sdio:
fail_nand:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
fail_core:
- clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_X2CORE]);
fail_eip:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
fail_ppv2:
- clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
-fail_apll:
+ clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_PLL0]);
+fail_pll0:
return ret;
}
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index f5d815f577e0..5eeecee17b69 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -67,6 +67,7 @@
#define LPC32XX_USB_CLK_STS 0xF8
static struct regmap_config lpc32xx_scb_regmap_config = {
+ .name = "scb",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 4e9b8c2c8980..1ee75a5e93f4 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -28,22 +28,14 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
- u32 div;
- struct clk_hw *hw_parent = clk_hw_get_parent(hw);
-
- regmap_read(clkr->regmap, divider->reg, &div);
- div >>= divider->shift;
- div &= BIT(divider->width) - 1;
- div += 1;
-
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- if (!hw_parent)
- return -EINVAL;
+ u32 val;
- *prate = clk_hw_round_rate(hw_parent, rate * div);
- }
+ regmap_read(clkr->regmap, divider->reg, &val);
+ val >>= divider->shift;
+ val &= BIT(divider->width) - 1;
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
}
static long div_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index c60f61b10c7f..b94981447664 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -29,6 +29,7 @@
#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+#define QCOM_RPM_XO_MODE_ON 0x2
#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
static struct clk_rpm _platform##_##_active; \
@@ -56,6 +57,18 @@
}, \
}
+#define DEFINE_CLK_RPM_XO_BUFFER(_platform, _name, _active, offset) \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
+ .xo_offset = (offset), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_xo_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \
static struct clk_rpm _platform##_##_name = { \
.rpm_clk_id = (r_id), \
@@ -126,8 +139,11 @@
#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+struct rpm_cc;
+
struct clk_rpm {
const int rpm_clk_id;
+ const int xo_offset;
const bool active_only;
unsigned long rate;
bool enabled;
@@ -135,12 +151,15 @@ struct clk_rpm {
struct clk_rpm *peer;
struct clk_hw hw;
struct qcom_rpm *rpm;
+ struct rpm_cc *rpm_cc;
};
struct rpm_cc {
struct qcom_rpm *rpm;
struct clk_rpm **clks;
size_t num_clks;
+ u32 xo_buffer_value;
+ struct mutex xo_lock;
};
struct rpm_clk_desc {
@@ -159,7 +178,8 @@ static int clk_rpm_handoff(struct clk_rpm *r)
* The vendor tree simply reads the status for this
* RPM clock.
*/
- if (r->rpm_clk_id == QCOM_RPM_PLL_4)
+ if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
+ r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
return 0;
ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
@@ -288,6 +308,46 @@ out:
mutex_unlock(&rpm_clk_lock);
}
+static int clk_rpm_xo_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct rpm_cc *rcc = r->rpm_cc;
+ int ret, clk_id = r->rpm_clk_id;
+ u32 value;
+
+ mutex_lock(&rcc->xo_lock);
+
+ value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
+ if (!ret) {
+ r->enabled = true;
+ rcc->xo_buffer_value = value;
+ }
+
+ mutex_unlock(&rcc->xo_lock);
+
+ return ret;
+}
+
+static void clk_rpm_xo_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct rpm_cc *rcc = r->rpm_cc;
+ int ret, clk_id = r->rpm_clk_id;
+ u32 value;
+
+ mutex_lock(&rcc->xo_lock);
+
+ value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
+ if (!ret) {
+ r->enabled = false;
+ rcc->xo_buffer_value = value;
+ }
+
+ mutex_unlock(&rcc->xo_lock);
+}
+
static int clk_rpm_fixed_prepare(struct clk_hw *hw)
{
struct clk_rpm *r = to_clk_rpm(hw);
@@ -378,6 +438,11 @@ static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
+static const struct clk_ops clk_rpm_xo_ops = {
+ .prepare = clk_rpm_xo_prepare,
+ .unprepare = clk_rpm_xo_unprepare,
+};
+
static const struct clk_ops clk_rpm_fixed_ops = {
.prepare = clk_rpm_fixed_prepare,
.unprepare = clk_rpm_fixed_unprepare,
@@ -449,6 +514,11 @@ DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d0_clk, xo_d0_a_clk, 0);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d1_clk, xo_d1_a_clk, 8);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a0_clk, xo_a0_a_clk, 16);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a1_clk, xo_a1_a_clk, 24);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a2_clk, xo_a2_a_clk, 28);
static struct clk_rpm *apq8064_clks[] = {
[RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
@@ -469,6 +539,11 @@ static struct clk_rpm *apq8064_clks[] = {
[RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
[RPM_QDSS_CLK] = &apq8064_qdss_clk,
[RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+ [RPM_XO_D0] = &apq8064_xo_d0_clk,
+ [RPM_XO_D1] = &apq8064_xo_d1_clk,
+ [RPM_XO_A0] = &apq8064_xo_a0_clk,
+ [RPM_XO_A1] = &apq8064_xo_a1_clk,
+ [RPM_XO_A2] = &apq8064_xo_a2_clk,
};
static const struct rpm_clk_desc rpm_clk_apq8064 = {
@@ -526,12 +601,14 @@ static int rpm_clk_probe(struct platform_device *pdev)
rcc->clks = rpm_clks;
rcc->num_clks = num_clks;
+ mutex_init(&rcc->xo_lock);
for (i = 0; i < num_clks; i++) {
if (!rpm_clks[i])
continue;
rpm_clks[i]->rpm = rpm;
+ rpm_clks[i]->rpm_cc = rcc;
ret = clk_rpm_handoff(rpm_clks[i]);
if (ret)
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index c26d9007bfc4..850c02a52248 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -686,7 +686,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
goto err;
}
- ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_smdrpm_clk_hw_get,
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
rcc);
if (ret)
goto err;
@@ -697,19 +697,12 @@ err:
return ret;
}
-static int rpm_smd_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
-}
-
static struct platform_driver rpm_smd_clk_driver = {
.driver = {
.name = "qcom-clk-smd-rpm",
.of_match_table = rpm_smd_clk_match_table,
},
.probe = rpm_smd_clk_probe,
- .remove = rpm_smd_clk_remove,
};
static int __init rpm_smd_clk_init(void)
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 5d7451209206..3d6452932797 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2895,7 +2895,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
.name = "gcc_aggre0_snoc_axi_clk",
.parent_names = (const char *[]){ "system_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2910,7 +2910,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
.name = "gcc_aggre0_cnoc_ahb_clk",
.parent_names = (const char *[]){ "config_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2925,7 +2925,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
.name = "gcc_smmu_aggre0_axi_clk",
.parent_names = (const char *[]){ "system_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2940,7 +2940,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
.name = "gcc_smmu_aggre0_ahb_clk",
.parent_names = (const char *[]){ "config_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 43b5a89c4b28..ef76c861ec84 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -15,7 +15,9 @@ config CLK_RENESAS
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
+ select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
@@ -24,12 +26,13 @@ if CLK_RENESAS
config CLK_RENESAS_LEGACY
bool "Legacy DT clock support"
depends on CLK_R8A7790 || CLK_R8A7791 || CLK_R8A7792 || CLK_R8A7794
- default y
help
Enable backward compatibility with old device trees describing a
hierarchical representation of the various CPG and MSTP clocks.
Say Y if you want your kernel to work with old DTBs.
+ It is safe to say N if you use the DTS that is supplied with the
+ current kernel source tree.
# SoC
config CLK_EMEV2
@@ -96,10 +99,18 @@ config CLK_R8A7796
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77965
+ bool "R-Car M3-N clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77970
bool "R-Car V3M clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77980
+ bool "R-Car V3H clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 34c4e0b37afa..6c0f19636e3e 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -14,7 +14,9 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77965) += r8a77965-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 151336d2ba59..9febbf42c3df 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -53,9 +53,9 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
struct div6_clock *clock = to_div6_clock(hw);
u32 val;
- val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
+ val = (readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
| CPG_DIV6_DIV(clock->div - 1);
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -65,7 +65,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
struct div6_clock *clock = to_div6_clock(hw);
u32 val;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
val |= CPG_DIV6_CKSTP;
/*
* DIV6 clocks require the divisor field to be non-zero when stopping
@@ -75,14 +75,14 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
*/
if (!(val & CPG_DIV6_DIV_MASK))
val |= CPG_DIV6_DIV_MASK;
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
}
static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
{
struct div6_clock *clock = to_div6_clock(hw);
- return !(clk_readl(clock->reg) & CPG_DIV6_CKSTP);
+ return !(readl(clock->reg) & CPG_DIV6_CKSTP);
}
static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
@@ -122,10 +122,10 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->div = div;
- val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
+ val = readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
/* Only program the new divisor if the clock isn't stopped. */
if (!(val & CPG_DIV6_CKSTP))
- clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
+ writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
return 0;
}
@@ -139,7 +139,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_width == 0)
return 0;
- hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
+ hw_index = (readl(clock->reg) >> clock->src_shift) &
(BIT(clock->src_width) - 1);
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
@@ -163,8 +163,8 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
hw_index = clock->parents[index];
- clk_writel((clk_readl(clock->reg) & mask) |
- (hw_index << clock->src_shift), clock->reg);
+ writel((readl(clock->reg) & mask) | (hw_index << clock->src_shift),
+ clock->reg);
return 0;
}
@@ -241,7 +241,7 @@ struct clk * __init cpg_div6_register(const char *name,
* Read the divisor. Disabling the clock overwrites the divisor, so we
* need to cache its value for the enable operation.
*/
- clock->div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+ clock->div = (readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
switch (num_parents) {
case 1:
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 858c24d4da8f..e82adcb16a52 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -64,13 +64,13 @@ struct mstp_clock {
static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
u32 __iomem *reg)
{
- return group->width_8bit ? readb(reg) : clk_readl(reg);
+ return group->width_8bit ? readb(reg) : readl(reg);
}
static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
u32 __iomem *reg)
{
- group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
+ group->width_8bit ? writeb(val, reg) : writel(val, reg);
}
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 28d204bb659e..7b903ce4c901 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -71,7 +71,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
if (!strcmp(name, "main")) {
- u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+ u32 ckscr = readl(cpg->reg + CPG_CKSCR);
switch ((ckscr >> 28) & 3) {
case 0: /* extal1 */
@@ -95,14 +95,14 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(cpg->reg + CPG_PLL0CR);
parent_name = "main";
mult = ((value >> 24) & 0x7f) + 1;
if (value & BIT(20))
div = 2;
} else if (!strcmp(name, "pll1")) {
- u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+ u32 value = readl(cpg->reg + CPG_PLL1CR);
parent_name = "main";
/* XXX: enable bit? */
@@ -125,7 +125,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- value = clk_readl(cpg->reg + cr);
+ value = readl(cpg->reg + cr);
switch ((value >> 5) & 7) {
case 0:
parent_name = "main";
@@ -161,8 +161,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
shift = 0;
}
div *= 32;
- mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
- & 0x1f);
+ mult = 0x20 - ((readl(cpg->reg + CPG_FRQCRC) >> shift) & 0x1f);
} else {
struct div4_clk *c;
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 2f7ce6696b6c..d074f8e982d0 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -98,20 +98,20 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_FRQCRC);
+ u32 value = readl(cpg->reg + CPG_FRQCRC);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
} else if (!strcmp(name, "pllc1")) {
- u32 value = clk_readl(cpg->reg + CPG_FRQCRA);
+ u32 value = readl(cpg->reg + CPG_FRQCRA);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
div = 2;
} else if (!strcmp(name, "pllc2")) {
- u32 value = clk_readl(cpg->reg + CPG_PLLC2CR);
+ u32 value = readl(cpg->reg + CPG_PLLC2CR);
parent_name = "system";
mult = ((value >> 24) & 0x3f) + 1;
} else if (!strcmp(name, "usb24s")) {
- u32 value = clk_readl(cpg->reg + CPG_USBCKCR);
+ u32 value = readl(cpg->reg + CPG_USBCKCR);
if (value & BIT(7))
/* extal2 */
parent_name = of_clk_get_parent_name(np, 1);
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index d14cbe1ca29a..ee32a022e6da 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -62,8 +62,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned int mult;
unsigned int val;
- val = (clk_readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK)
- >> CPG_FRQCRC_ZFC_SHIFT;
+ val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
mult = 32 - val;
return div_u64((u64)parent_rate * mult, 32);
@@ -95,21 +94,21 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
mult = div_u64((u64)rate * 32, parent_rate);
mult = clamp(mult, 1U, 32U);
- if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- val = clk_readl(zclk->reg);
+ val = readl(zclk->reg);
val &= ~CPG_FRQCRC_ZFC_MASK;
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
- clk_writel(val, zclk->reg);
+ writel(val, zclk->reg);
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
* clock change completion.
*/
- kick = clk_readl(zclk->kick_reg);
+ kick = readl(zclk->kick_reg);
kick |= CPG_FRQCRB_KICK;
- clk_writel(kick, zclk->kick_reg);
+ writel(kick, zclk->kick_reg);
/*
* Note: There is no HW information about the worst case latency.
@@ -121,7 +120,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
* "super" safe value.
*/
for (i = 1000; i; i--) {
- if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
return 0;
cpu_relax();
@@ -332,7 +331,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
mult = config->pll0_mult;
div = 3;
} else {
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(cpg->reg + CPG_PLL0CR);
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
}
parent_name = "main";
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 127c58135c8f..67dd712aa723 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -75,9 +75,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
* let them run at fixed current speed and implement the details later.
*/
if (strcmp(name, "i") == 0)
- val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ val = (readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
else if (strcmp(name, "g") == 0)
- val = clk_readl(cpg->reg + CPG_FRQCR2) & 3;
+ val = readl(cpg->reg + CPG_FRQCR2) & 3;
else
return ERR_PTR(-EINVAL);
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index eea38f6ea77e..bab33610eb6c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -46,7 +46,7 @@ struct div4_clk {
unsigned int shift;
};
-static struct div4_clk div4_clks[] = {
+static const struct div4_clk div4_clks[] = {
{ "zg", "pll0", CPG_FRQCRA, 16 },
{ "m3", "pll1", CPG_FRQCRA, 12 },
{ "b", "pll1", CPG_FRQCRA, 8 },
@@ -79,13 +79,13 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
- const char *parent_name;
+ const char *parent_name = NULL;
unsigned int mult = 1;
unsigned int div = 1;
if (!strcmp(name, "main")) {
/* extal1, extal1_div2, extal2, extal2_div2 */
- u32 parent_idx = (clk_readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
+ u32 parent_idx = (readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
parent_name = of_clk_get_parent_name(np, parent_idx >> 1);
div = (parent_idx & 1) + 1;
@@ -110,11 +110,11 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- if (clk_readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
- mult = ((clk_readl(enable_reg) >> 24) & 0x3f) + 1;
+ if (readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
+ mult = ((readl(enable_reg) >> 24) & 0x3f) + 1;
/* handle CFG bit for PLL1 and PLL2 */
if (enable_bit == 1 || enable_bit == 2)
- if (clk_readl(enable_reg) & BIT(20))
+ if (readl(enable_reg) & BIT(20))
mult *= 2;
}
} else if (!strcmp(name, "dsi0phy") || !strcmp(name, "dsi1phy")) {
@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
shift = 24;
width = 5;
} else {
- struct div4_clk *c;
+ const struct div4_clk *c;
for (c = div4_clks; c->name; c++) {
if (!strcmp(name, c->name)) {
@@ -193,9 +193,9 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
return;
/* Set SDHI clocks to a known state */
- clk_writel(0x108, cpg->reg + CPG_SD0CKCR);
- clk_writel(0x108, cpg->reg + CPG_SD1CKCR);
- clk_writel(0x108, cpg->reg + CPG_SD2CKCR);
+ writel(0x108, cpg->reg + CPG_SD0CKCR);
+ writel(0x108, cpg->reg + CPG_SD1CKCR);
+ writel(0x108, cpg->reg + CPG_SD2CKCR);
for (i = 0; i < num_clks; ++i) {
const char *name;
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
index 6dc0b3082aa6..d3c8b1e2969f 100644
--- a/drivers/clk/renesas/r8a7743-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -117,6 +117,7 @@ static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7743_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7743_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7743_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7743_CLK_R),
DEF_MOD("irqc", 407, R8A7743_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7743_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7743_CLK_HP),
@@ -195,6 +196,7 @@ static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
};
static const unsigned int r8a7743_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 2859504cc866..87f5a3619e4f 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -114,6 +114,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7745_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7745_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7745_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7745_CLK_R),
DEF_MOD("irqc", 407, R8A7745_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7745_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7745_CLK_HP),
@@ -180,6 +181,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
};
static const unsigned int r8a7745_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c
index 46bb55bb223d..f936cb74b681 100644
--- a/drivers/clk/renesas/r8a7790-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c
@@ -140,6 +140,7 @@ static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7790_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7790_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7790_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7790_CLK_R),
DEF_MOD("irqc", 407, R8A7790_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7790_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7790_CLK_HP),
@@ -211,6 +212,7 @@ static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
};
static const unsigned int r8a7790_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
index c0b51f9bb278..820b220b09cc 100644
--- a/drivers/clk/renesas/r8a7791-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -128,6 +128,7 @@ static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7791_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7791_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7791_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7791_CLK_R),
DEF_MOD("irqc", 407, R8A7791_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7791_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7791_CLK_HP),
@@ -209,6 +210,7 @@ static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
};
static const unsigned int r8a7791_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index 7f85bbf20bf7..609a54080496 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -98,6 +98,7 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
DEF_MOD("tpu0", 304, R8A7792_CLK_CP),
DEF_MOD("sdhi0", 314, R8A7792_CLK_SD),
DEF_MOD("cmt1", 329, R8A7792_CLK_R),
+ DEF_MOD("rwdt", 402, R8A7792_CLK_R),
DEF_MOD("irqc", 407, R8A7792_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7792_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7792_CLK_HP),
@@ -154,6 +155,7 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
};
static const unsigned int r8a7792_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
index ec091a42da54..2a40bbeaeeaf 100644
--- a/drivers/clk/renesas/r8a7794-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -121,6 +121,7 @@ static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7794_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7794_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7794_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7794_CLK_R),
DEF_MOD("irqc", 407, R8A7794_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7794_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7794_CLK_HP),
@@ -190,6 +191,7 @@ static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
};
static const unsigned int r8a7794_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index b1d9f48eae9e..775b0ceaa337 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -74,6 +74,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
+ DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 41e29734126b..dfb267a92f2a 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -74,6 +74,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
+ DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
new file mode 100644
index 000000000000..b1acfb60351c
--- /dev/null
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77965 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77965-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77965_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_SSPSRC,
+ CLK_RINT,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77965_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77965_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77965_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A77965_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A77965_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A77965_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A77965_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A77965_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A77965_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A77965_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d1", R8A77965_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77965_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77965_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77965_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77965_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77965_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77965_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77965_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77965_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, CLK_SDSRC, 0x074),
+ DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, CLK_SDSRC, 0x078),
+ DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
+
+ DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77965_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A77965_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A77965_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A77965_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
+
+ DEF_DIV6_RO("osc", R8A77965_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
+ DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_BASE("r", R8A77965_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
+};
+
+static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A77965_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A77965_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A77965_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A77965_CLK_S3D4),
+ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
+
+ DEF_MOD("cmt3", 300, R8A77965_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77965_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77965_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77965_CLK_R),
+ DEF_MOD("scif2", 310, R8A77965_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A77965_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A77965_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A77965_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A77965_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A77965_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A77965_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A77965_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A77965_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A77965_CLK_S3D1),
+
+ DEF_MOD("rwdt", 402, R8A77965_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
+
+ DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
+ DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
+ DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A77965_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A77965_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A77965_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A77965_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A77965_CLK_S3D2),
+ DEF_MOD("hscif4", 516, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A77965_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A77965_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77965_CLK_S0D12),
+
+ DEF_MOD("fcpvd1", 602, R8A77965_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A77965_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A77965_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A77965_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A77965_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A77965_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A77965_CLK_S0D1),
+
+ DEF_MOD("ehci1", 702, R8A77965_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A77965_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A77965_CLK_S3D4),
+ DEF_MOD("csi20", 714, R8A77965_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A77965_CLK_CSI0),
+ DEF_MOD("du3", 721, R8A77965_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A77965_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A77965_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77965_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A77965_CLK_HDMI),
+
+ DEF_MOD("vin7", 804, R8A77965_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A77965_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A77965_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A77965_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A77965_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A77965_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A77965_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A77965_CLK_S0D2),
+ DEF_MOD("etheravb", 812, R8A77965_CLK_S0D6),
+ DEF_MOD("imr1", 822, R8A77965_CLK_S0D2),
+ DEF_MOD("imr0", 823, R8A77965_CLK_S0D2),
+
+ DEF_MOD("gpio7", 905, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A77965_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A77965_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A77965_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A77965_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c-dvfs", 926, R8A77965_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A77965_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77965_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77965_CLK_S3D2),
+
+ DEF_MOD("ssi-all", 1005, R8A77965_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A77965_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a77965_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3 PLL4
+ * 14 13 19 17 (MHz)
+ *-----------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x192 x144
+ * 0 0 0 1 16.66 x 1 x180 x192 x128 x144
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x192 x144
+ * 0 1 0 0 20 x 1 x150 x160 x160 x120
+ * 0 1 0 1 20 x 1 x150 x160 x106 x120
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x160 x120
+ * 1 0 0 0 25 x 1 x120 x128 x128 x96
+ * 1 0 0 1 25 x 1 x120 x128 x84 x96
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x128 x96
+ * 1 1 0 0 33.33 / 2 x180 x192 x192 x144
+ * 1 1 0 1 33.33 / 2 x180 x192 x128 x144
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x192 x144
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 160, 1, 106, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, },
+ { 1, 128, 1, 128, 1, },
+ { 1, 128, 1, 84, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, },
+ { 2, 192, 1, 192, 1, },
+ { 2, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, },
+};
+
+static int __init r8a77965_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+};
+
+const struct cpg_mssr_info r8a77965_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77965_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77965_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77965_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77965_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77965_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77965_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77965_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
new file mode 100644
index 000000000000..7aaae73a321a
--- /dev/null
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77980 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+#include <linux/sys_soc.h>
+
+#include <dt-bindings/clock/r8a77980-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77980_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77980_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77980_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A77980_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A77980_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A77980_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A77980_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A77980_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12", R8A77980_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24", R8A77980_CLK_S0D24, CLK_S0, 24, 1),
+ DEF_FIXED("s1d1", R8A77980_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77980_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77980_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77980_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77980_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77980_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77980_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77980_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77980_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, CLK_SDSRC, 0x0074),
+
+ DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77980_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77980_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A77980_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A77980_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+};
+
+static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu2", 123, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu1", 124, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu0", 125, R8A77980_CLK_CP),
+ DEF_MOD("scif4", 203, R8A77980_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A77980_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A77980_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A77980_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A77980_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77980_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77980_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77980_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A77980_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A77980_CLK_S0D3),
+ DEF_MOD("tpu0", 304, R8A77980_CLK_S3D4),
+ DEF_MOD("sdif", 314, R8A77980_CLK_SD0),
+ DEF_MOD("pciec0", 319, R8A77980_CLK_S3D1),
+ DEF_MOD("intc-ex", 407, R8A77980_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77980_CLK_S0D3),
+ DEF_MOD("hscif3", 517, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A77980_CLK_S3D1),
+ DEF_MOD("imp4", 521, R8A77980_CLK_S1D1),
+ DEF_MOD("thermal", 522, R8A77980_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77980_CLK_S0D12),
+ DEF_MOD("impdma1", 526, R8A77980_CLK_S1D1),
+ DEF_MOD("impdma0", 527, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv4", 528, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv3", 529, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv2", 531, R8A77980_CLK_S1D1),
+ DEF_MOD("fcpvd0", 603, R8A77980_CLK_S3D1),
+ DEF_MOD("vspd0", 623, R8A77980_CLK_S3D1),
+ DEF_MOD("csi41", 715, R8A77980_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A77980_CLK_CSI0),
+ DEF_MOD("du0", 724, R8A77980_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77980_CLK_S2D1),
+ DEF_MOD("etheravb", 812, R8A77980_CLK_S3D2),
+ DEF_MOD("gether", 813, R8A77980_CLK_S3D2),
+ DEF_MOD("imp3", 824, R8A77980_CLK_S1D1),
+ DEF_MOD("imp2", 825, R8A77980_CLK_S1D1),
+ DEF_MOD("imp1", 826, R8A77980_CLK_S1D1),
+ DEF_MOD("imp0", 827, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv1", 828, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv0", 829, R8A77980_CLK_S1D1),
+ DEF_MOD("impram", 830, R8A77980_CLK_S1D1),
+ DEF_MOD("impcnn", 831, R8A77980_CLK_S1D1),
+ DEF_MOD("gpio5", 907, R8A77980_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A77980_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A77980_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A77980_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77980_CLK_S3D2),
+};
+
+static const unsigned int r8a77980_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL2 PLL1 PLL3
+ * 14 13 (MHz)
+ * --------------------------------------------------
+ * 0 0 16.66 x 1 x240 x192 x192
+ * 0 1 20 x 1 x200 x160 x160
+ * 1 0 27 x 1 x148 x118 x118
+ * 1 1 33.33 / 2 x240 x192 x192
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 118, 1, 118, 1, },
+ { 2, 192, 1, 192, 1, },
+};
+
+static int __init r8a77980_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77980_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77980_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77980_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77980_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77980_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77980_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77980_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77980_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 0904886f5501..628b63b85d3f 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -13,6 +13,7 @@
*/
#include <linux/bug.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -62,6 +63,140 @@ static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
}
/*
+ * Z Clock & Z2 Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = (parent->rate * mult / 32 ) / 2
+ * parent - fixed parent. No clk_set_parent support
+ */
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x000000e0
+#define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8)
+#define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0)
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ /* Factor of 2 is for fixed divider */
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Factor of 2 is for fixed divider */
+ unsigned long prate = *parent_rate / 2;
+ unsigned int mult;
+
+ mult = div_u64(rate * 32ULL, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return (u64)prate * mult / 32;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+ u32 val, kick;
+
+ /* Factor of 2 is for fixed divider */
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ val = readl(zclk->reg) & ~zclk->mask;
+ val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask;
+ writel(val, zclk->reg);
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned long mask)
+{
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = mask;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+/*
* SDn Clock
*/
#define CPG_SD_STP_HCK BIT(9)
@@ -420,6 +555,14 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
mult = 1;
break;
+ case CLK_TYPE_GEN3_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, CPG_FRQCRC_ZFC_MASK);
+
+ case CLK_TYPE_GEN3_Z2:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, CPG_FRQCRC_Z2FC_MASK);
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 2e4284399f53..ea4f8fc3c4c9 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -21,6 +21,8 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_PE,
+ CLK_TYPE_GEN3_Z,
+ CLK_TYPE_GEN3_Z2,
};
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e3cc72c81311..4e88e980fb76 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -693,12 +693,24 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77965
+ {
+ .compatible = "renesas,r8a77965-cpg-mssr",
+ .data = &r8a77965_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77970
{
.compatible = "renesas,r8a77970-cpg-mssr",
.data = &r8a77970_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77980
+ {
+ .compatible = "renesas,r8a77980-cpg-mssr",
+ .data = &r8a77980_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77995
{
.compatible = "renesas,r8a77995-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 0745b0930308..97ccb093c10f 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -139,7 +139,9 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77965_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 077fcdc7908b..026a26bb702d 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -25,6 +25,8 @@ struct rockchip_mmc_clock {
void __iomem *reg;
int id;
int shift;
+ int cached_phase;
+ struct notifier_block clk_rate_change_nb;
};
#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
@@ -58,6 +60,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
u16 degrees;
u32 delay_num = 0;
+ /* See the comment for rockchip_mmc_set_phase below */
+ if (!rate) {
+ pr_err("%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -84,6 +92,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
u32 raw_value;
u32 delay;
+ /*
+ * The below calculation is based on the output clock from
+ * MMC host to the card, which expects the phase clock inherits
+ * the clock rate from its parent, namely the output clock
+ * provider of MMC host. However, things may go wrong if
+ * (1) It is orphan.
+ * (2) It is assigned to the wrong parent.
+ *
+ * This check help debug the case (1), which seems to be the
+ * most likely problem we often face and which makes it difficult
+ * for people to debug unstable mmc tuning results.
+ */
+ if (!rate) {
+ pr_err("%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
nineties = degrees / 90;
remainder = (degrees % 90);
@@ -139,6 +164,41 @@ static const struct clk_ops rockchip_mmc_clk_ops = {
.set_phase = rockchip_mmc_set_phase,
};
+#define to_rockchip_mmc_clock(x) \
+ container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb)
+static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb);
+ struct clk_notifier_data *ndata = data;
+
+ /*
+ * rockchip_mmc_clk is mostly used by mmc controllers to sample
+ * the intput data, which expects the fixed phase after the tuning
+ * process. However if the clock rate is changed, the phase is stale
+ * and may break the data sampling. So here we try to restore the phase
+ * for that case, except that
+ * (1) cached_phase is invaild since we inevitably cached it when the
+ * clock provider be reparented from orphan to its real parent in the
+ * first place. Otherwise we may mess up the initialization of MMC cards
+ * since we only set the default sample phase and drive phase later on.
+ * (2) the new coming rate is higher than the older one since mmc driver
+ * set the max-frequency to match the boards' ability but we can't go
+ * over the heads of that, otherwise the tests smoke out the issue.
+ */
+ if (ndata->old_rate <= ndata->new_rate)
+ return NOTIFY_DONE;
+
+ if (event == PRE_RATE_CHANGE)
+ mmc_clock->cached_phase =
+ rockchip_mmc_get_phase(&mmc_clock->hw);
+ else if (mmc_clock->cached_phase != -EINVAL &&
+ event == POST_RATE_CHANGE)
+ rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase);
+
+ return NOTIFY_DONE;
+}
+
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg, int shift)
@@ -146,6 +206,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
struct clk *clk;
+ int ret;
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
@@ -162,8 +223,21 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
- if (IS_ERR(clk))
- kfree(mmc_clock);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_register;
+ }
+
+ mmc_clock->clk_rate_change_nb.notifier_call =
+ &rockchip_mmc_clk_rate_notify;
+ ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb);
+ if (ret)
+ goto err_notifier;
return clk;
+err_notifier:
+ clk_unregister(clk);
+err_register:
+ kfree(mmc_clock);
+ return ERR_PTR(ret);
}
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 11e7f2d1c054..7af48184b022 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index b04f29774ee7..252366a5231f 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -304,7 +304,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
RK3328_CLKSEL_CON(1), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3328_CLKGATE_CON(7), 1, GFLAGS),
- GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_core_niu", "aclk_core", 0,
RK3328_CLKGATE_CON(13), 0, GFLAGS),
GATE(0, "aclk_gic400", "aclk_core", CLK_IGNORE_UNUSED,
RK3328_CLKGATE_CON(13), 1, GFLAGS),
@@ -318,7 +318,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(6), 6, GFLAGS),
GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(14), 0, GFLAGS),
- GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", 0,
RK3328_CLKGATE_CON(14), 1, GFLAGS),
/* PD_DDR */
@@ -513,9 +513,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(24), 0, GFLAGS),
GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(24), 1, GFLAGS),
- GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", 0,
RK3328_CLKGATE_CON(24), 2, GFLAGS),
- GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", 0,
RK3328_CLKGATE_CON(24), 3, GFLAGS),
COMPOSITE(SCLK_VDEC_CABAC, "sclk_vdec_cabac", mux_4plls_p, 0,
@@ -535,9 +535,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(23), 0, GFLAGS),
GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(23), 1, GFLAGS),
- GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", 0,
RK3328_CLKGATE_CON(23), 2, GFLAGS),
- GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", 0,
RK3328_CLKGATE_CON(23), 3, GFLAGS),
COMPOSITE(ACLK_RKVENC, "aclk_rkvenc", mux_4plls_p, 0,
@@ -545,9 +545,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(6), 3, GFLAGS),
FACTOR_GATE(HCLK_RKVENC, "hclk_rkvenc", "aclk_rkvenc", 0, 1, 4,
RK3328_CLKGATE_CON(11), 4, GFLAGS),
- GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 0, GFLAGS),
- GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 1, GFLAGS),
GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 0, GFLAGS),
@@ -588,7 +588,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_PRE, "aclk_vop_pre", mux_4plls_p, 0,
RK3328_CLKSEL_CON(39), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(5), 5, GFLAGS),
- GATE(0, "clk_hdmi_sfc", "xin24m", 0,
+ GATE(SCLK_HDMI_SFC, "sclk_hdmi_sfc", "xin24m", 0,
RK3328_CLKGATE_CON(5), 4, GFLAGS),
COMPOSITE_NODIV(0, "clk_cif_src", mux_2plls_p, 0,
@@ -602,7 +602,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(5), 6, GFLAGS),
DIV(DCLK_HDMIPHY, "dclk_hdmiphy", "dclk_lcdc_src", 0,
RK3328_CLKSEL_CON(40), 3, 3, DFLAGS),
- MUX(DCLK_LCDC, "dclk_lcdc", mux_dclk_lcdc_p, 0,
+ MUX(DCLK_LCDC, "dclk_lcdc", mux_dclk_lcdc_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK3328_CLKSEL_CON(40), 1, 1, MFLAGS),
/*
@@ -709,14 +709,14 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_VOP */
GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3328_CLKGATE_CON(21), 10, GFLAGS),
- GATE(0, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(0, "aclk_rga_niu", "aclk_rga_pre", 0, RK3328_CLKGATE_CON(22), 3, GFLAGS),
GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK3328_CLKGATE_CON(21), 2, GFLAGS),
- GATE(0, "aclk_vop_niu", "aclk_vop_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(0, "aclk_vop_niu", "aclk_vop_pre", 0, RK3328_CLKGATE_CON(21), 4, GFLAGS),
GATE(ACLK_IEP, "aclk_iep", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 6, GFLAGS),
GATE(ACLK_CIF, "aclk_cif", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 8, GFLAGS),
GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 15, GFLAGS),
- GATE(0, "aclk_vio_niu", "aclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(0, "aclk_vio_niu", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 2, GFLAGS),
GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 3, GFLAGS),
GATE(0, "hclk_vop_niu", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 5, GFLAGS),
@@ -724,10 +724,10 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_CIF, "hclk_cif", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 9, GFLAGS),
GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 11, GFLAGS),
GATE(0, "hclk_ahb1tom", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 12, GFLAGS),
- GATE(0, "pclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 13, GFLAGS),
- GATE(0, "hclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 14, GFLAGS),
+ GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 14, GFLAGS),
GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 0, GFLAGS),
- GATE(HCLK_VIO, "hclk_vio", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 1, GFLAGS),
+ GATE(0, "hclk_vio_niu", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 1, GFLAGS),
GATE(PCLK_HDMI, "pclk_hdmi", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 4, GFLAGS),
GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 5, GFLAGS),
@@ -743,19 +743,19 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 7, GFLAGS),
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 8, GFLAGS),
GATE(HCLK_OTG_PMU, "hclk_otg_pmu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 9, GFLAGS),
- GATE(0, "hclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 12, GFLAGS),
- GATE(0, "pclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 13, GFLAGS),
+ GATE(0, "hclk_peri_niu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 12, GFLAGS),
+ GATE(0, "pclk_peri_niu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 13, GFLAGS),
/* PD_GMAC */
GATE(ACLK_MAC2PHY, "aclk_mac2phy", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 0, GFLAGS),
GATE(ACLK_MAC2IO, "aclk_mac2io", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 2, GFLAGS),
- GATE(0, "aclk_gmac_niu", "aclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(0, "aclk_gmac_niu", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 4, GFLAGS),
GATE(PCLK_MAC2PHY, "pclk_mac2phy", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 1, GFLAGS),
GATE(PCLK_MAC2IO, "pclk_mac2io", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 3, GFLAGS),
- GATE(0, "pclk_gmac_niu", "pclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 5, GFLAGS),
+ GATE(0, "pclk_gmac_niu", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 5, GFLAGS),
/* PD_BUS */
- GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 12, GFLAGS),
+ GATE(0, "aclk_bus_niu", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 12, GFLAGS),
GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 11, GFLAGS),
GATE(ACLK_TSP, "aclk_tsp", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 12, GFLAGS),
GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 0, GFLAGS),
@@ -769,10 +769,10 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_TSP, "hclk_tsp", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 11, GFLAGS),
GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 7, GFLAGS),
GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 8, GFLAGS),
- GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 13, GFLAGS),
+ GATE(0, "hclk_bus_niu", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 13, GFLAGS),
GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(28), 0, GFLAGS),
- GATE(0, "pclk_bus_niu", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 14, GFLAGS),
+ GATE(0, "pclk_bus_niu", "pclk_bus", 0, RK3328_CLKGATE_CON(15), 14, GFLAGS),
GATE(0, "pclk_efuse", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 9, GFLAGS),
GATE(0, "pclk_otp", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(28), 4, GFLAGS),
GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3328_CLKGATE_CON(15), 10, GFLAGS),
@@ -807,37 +807,42 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(0, "pclk_acodecphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 5, GFLAGS),
GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 7, GFLAGS),
GATE(0, "pclk_vdacphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 8, GFLAGS),
- GATE(0, "pclk_phy_niu", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 15, GFLAGS),
+ GATE(0, "pclk_phy_niu", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(15), 15, GFLAGS),
/* PD_MMC */
- MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc",
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
RK3328_SDMMC_CON0, 1),
- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc",
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
RK3328_SDMMC_CON1, 1),
- MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio",
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
RK3328_SDIO_CON0, 1),
- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio",
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
RK3328_SDIO_CON1, 1),
- MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc",
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
RK3328_EMMC_CON0, 1),
- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc",
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
RK3328_EMMC_CON1, 1),
- MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "sclk_sdmmc_ext",
+ MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON0, 1),
- MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "sclk_sdmmc_ext",
+ MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON1, 1),
};
static const char *const rk3328_critical_clocks[] __initconst = {
"aclk_bus",
+ "aclk_bus_niu",
"pclk_bus",
+ "pclk_bus_niu",
"hclk_bus",
+ "hclk_bus_niu",
"aclk_peri",
"hclk_peri",
+ "hclk_peri_niu",
"pclk_peri",
+ "pclk_peri_niu",
"pclk_dbg",
"aclk_core_niu",
"aclk_gic400",
@@ -861,6 +866,20 @@ static const char *const rk3328_critical_clocks[] __initconst = {
"aclk_rga_niu",
"pclk_vio_h2p",
"hclk_vio_h2p",
+ "aclk_vio_niu",
+ "hclk_vio_niu",
+ "aclk_vop_niu",
+ "hclk_vop_niu",
+ "aclk_gpu_niu",
+ "aclk_rkvdec_niu",
+ "hclk_rkvdec_niu",
+ "aclk_vpu_niu",
+ "hclk_vpu_niu",
+ "aclk_rkvenc_niu",
+ "hclk_rkvenc_niu",
+ "aclk_gmac_niu",
+ "pclk_gmac_niu",
+ "pclk_phy_niu",
};
static void __init rk3328_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 6847120b61cd..bca10d618f0a 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -57,6 +57,7 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE(1656000000, 1, 69, 1, 1, 1, 0),
RK3036_PLL_RATE(1632000000, 1, 68, 1, 1, 1, 0),
RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
@@ -670,7 +671,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(9), 7, GFLAGS,
&rk3399_uart3_fracmux),
- COMPOSITE(0, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(PCLK_DDR, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_CLKGATE_CON(3), 4, GFLAGS),
@@ -886,7 +887,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(31), 8, GFLAGS),
/* sdio & sdmmc */
- COMPOSITE(0, "hclk_sd", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(HCLK_SD, "hclk_sd", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(13), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_CLKGATE_CON(12), 13, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 0,
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 35dbd63c2f49..3cd8ad59e0b7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -57,6 +57,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
struct clk_divider *div = NULL;
const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
*gate_ops = NULL;
+ int ret;
if (num_parents > 1) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
@@ -74,8 +75,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
+ if (!gate) {
+ ret = -ENOMEM;
goto err_gate;
+ }
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -86,8 +89,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
+ if (!div) {
+ ret = -ENOMEM;
goto err_div;
+ }
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -106,12 +111,19 @@ static struct clk *rockchip_clk_register_branch(const char *name,
gate ? &gate->hw : NULL, gate_ops,
flags);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_composite;
+ }
+
return clk;
+err_composite:
+ kfree(div);
err_div:
kfree(gate);
err_gate:
kfree(mux);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
struct rockchip_clk_frac {
@@ -291,8 +303,10 @@ static struct clk *rockchip_clk_register_frac_branch(
init.num_parents = child->num_parents;
mux_clk = clk_register(NULL, &frac_mux->hw);
- if (IS_ERR(mux_clk))
+ if (IS_ERR(mux_clk)) {
+ kfree(frac);
return clk;
+ }
rockchip_clk_add_lookup(ctx, mux_clk, child->id);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index ef8900bc077f..513826393158 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,9 +8,11 @@ obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
+obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5-subcmu.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
+obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 5bfc92ee3129..b4b057c7301c 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -143,10 +143,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_base)) {
- dev_err(dev, "failed to map audss registers\n");
+ if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- }
epll = ERR_PTR(-ENODEV);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 1b81e283f605..27c9d23657b3 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -670,73 +670,73 @@ static const struct samsung_gate_clock gate_clks[] __initconst = {
/* APLL & MPLL & BPLL & UPLL */
static const struct samsung_pll_rate_table exynos3250_pll_rates[] __initconst = {
- PLL_35XX_RATE(1200000000, 400, 4, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1066000000, 533, 6, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE( 960000000, 320, 4, 1),
- PLL_35XX_RATE( 900000000, 300, 4, 1),
- PLL_35XX_RATE( 850000000, 425, 6, 1),
- PLL_35XX_RATE( 800000000, 200, 3, 1),
- PLL_35XX_RATE( 700000000, 175, 3, 1),
- PLL_35XX_RATE( 667000000, 667, 12, 1),
- PLL_35XX_RATE( 600000000, 400, 4, 2),
- PLL_35XX_RATE( 533000000, 533, 6, 2),
- PLL_35XX_RATE( 520000000, 260, 3, 2),
- PLL_35XX_RATE( 500000000, 250, 3, 2),
- PLL_35XX_RATE( 400000000, 200, 3, 2),
- PLL_35XX_RATE( 200000000, 200, 3, 3),
- PLL_35XX_RATE( 100000000, 200, 3, 4),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 400, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1066000000, 533, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 960000000, 320, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 300, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 850000000, 425, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 667000000, 667, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 400, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 520000000, 260, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 100000000, 200, 3, 4),
{ /* sentinel */ }
};
/* EPLL */
static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = {
- PLL_36XX_RATE(800000000, 200, 3, 1, 0),
- PLL_36XX_RATE(288000000, 96, 2, 2, 0),
- PLL_36XX_RATE(192000000, 128, 2, 3, 0),
- PLL_36XX_RATE(144000000, 96, 2, 3, 0),
- PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
- PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
- PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
- PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
- PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
- PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
- PLL_36XX_RATE( 50000000, 200, 3, 5, 0),
- PLL_36XX_RATE( 49152002, 131, 2, 5, 4719),
- PLL_36XX_RATE( 48000000, 128, 2, 5, 0),
- PLL_36XX_RATE( 45158401, 180, 3, 5, 41524),
+ PLL_36XX_RATE(24 * MHZ, 800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 288000000, 96, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 128, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 144000000, 96, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 96000000, 128, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 84000000, 112, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 80000003, 106, 2, 4, 43691),
+ PLL_36XX_RATE(24 * MHZ, 73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737598, 270, 3, 5, 62285),
+ PLL_36XX_RATE(24 * MHZ, 65535999, 174, 2, 5, 49982),
+ PLL_36XX_RATE(24 * MHZ, 50000000, 200, 3, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 49152002, 131, 2, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 48000000, 128, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 180, 3, 5, 41524),
{ /* sentinel */ }
};
/* VPLL */
static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = {
- PLL_36XX_RATE(600000000, 100, 2, 1, 0),
- PLL_36XX_RATE(533000000, 266, 3, 2, 32768),
- PLL_36XX_RATE(519230987, 173, 2, 2, 5046),
- PLL_36XX_RATE(500000000, 250, 3, 2, 0),
- PLL_36XX_RATE(445500000, 148, 2, 2, 32768),
- PLL_36XX_RATE(445055007, 148, 2, 2, 23047),
- PLL_36XX_RATE(400000000, 200, 3, 2, 0),
- PLL_36XX_RATE(371250000, 123, 2, 2, 49152),
- PLL_36XX_RATE(370878997, 185, 3, 2, 28803),
- PLL_36XX_RATE(340000000, 170, 3, 2, 0),
- PLL_36XX_RATE(335000015, 111, 2, 2, 43691),
- PLL_36XX_RATE(333000000, 111, 2, 2, 0),
- PLL_36XX_RATE(330000000, 110, 2, 2, 0),
- PLL_36XX_RATE(320000015, 106, 2, 2, 43691),
- PLL_36XX_RATE(300000000, 100, 2, 2, 0),
- PLL_36XX_RATE(275000000, 275, 3, 3, 0),
- PLL_36XX_RATE(222750000, 148, 2, 3, 32768),
- PLL_36XX_RATE(222528007, 148, 2, 3, 23069),
- PLL_36XX_RATE(160000000, 160, 3, 3, 0),
- PLL_36XX_RATE(148500000, 99, 2, 3, 0),
- PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
- PLL_36XX_RATE(108000000, 144, 2, 4, 0),
- PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
- PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
- PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
- PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 533000000, 266, 3, 2, 32768),
+ PLL_36XX_RATE(24 * MHZ, 519230987, 173, 2, 2, 5046),
+ PLL_36XX_RATE(24 * MHZ, 500000000, 250, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 445500000, 148, 2, 2, 32768),
+ PLL_36XX_RATE(24 * MHZ, 445055007, 148, 2, 2, 23047),
+ PLL_36XX_RATE(24 * MHZ, 400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 371250000, 123, 2, 2, 49152),
+ PLL_36XX_RATE(24 * MHZ, 370878997, 185, 3, 2, 28803),
+ PLL_36XX_RATE(24 * MHZ, 340000000, 170, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 335000015, 111, 2, 2, 43691),
+ PLL_36XX_RATE(24 * MHZ, 333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 330000000, 110, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 320000015, 106, 2, 2, 43691),
+ PLL_36XX_RATE(24 * MHZ, 300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 275000000, 275, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 222750000, 148, 2, 3, 32768),
+ PLL_36XX_RATE(24 * MHZ, 222528007, 148, 2, 3, 23069),
+ PLL_36XX_RATE(24 * MHZ, 160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 148500000, 99, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 148352005, 98, 2, 3, 59070),
+ PLL_36XX_RATE(24 * MHZ, 108000000, 144, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 74250000, 99, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 74176002, 98, 2, 4, 59070),
+ PLL_36XX_RATE(24 * MHZ, 54054000, 216, 3, 5, 14156),
+ PLL_36XX_RATE(24 * MHZ, 54000000, 144, 2, 5, 0),
{ /* sentinel */ }
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 134f25f2a913..0421960eb963 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1266,77 +1266,78 @@ static const struct of_device_id ext_clk_match[] __initconst = {
/* PLLs PMS values */
static const struct samsung_pll_rate_table exynos4210_apll_rates[] __initconst = {
- PLL_45XX_RATE(1200000000, 150, 3, 1, 28),
- PLL_45XX_RATE(1000000000, 250, 6, 1, 28),
- PLL_45XX_RATE( 800000000, 200, 6, 1, 28),
- PLL_45XX_RATE( 666857142, 389, 14, 1, 13),
- PLL_45XX_RATE( 600000000, 100, 4, 1, 13),
- PLL_45XX_RATE( 533000000, 533, 24, 1, 5),
- PLL_45XX_RATE( 500000000, 250, 6, 2, 28),
- PLL_45XX_RATE( 400000000, 200, 6, 2, 28),
- PLL_45XX_RATE( 200000000, 200, 6, 3, 28),
+ PLL_4508_RATE(24 * MHZ, 1200000000, 150, 3, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 1000000000, 250, 6, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 800000000, 200, 6, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 666857142, 389, 14, 1, 13),
+ PLL_4508_RATE(24 * MHZ, 600000000, 100, 4, 1, 13),
+ PLL_4508_RATE(24 * MHZ, 533000000, 533, 24, 1, 5),
+ PLL_4508_RATE(24 * MHZ, 500000000, 250, 6, 2, 28),
+ PLL_4508_RATE(24 * MHZ, 400000000, 200, 6, 2, 28),
+ PLL_4508_RATE(24 * MHZ, 200000000, 200, 6, 3, 28),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4210_epll_rates[] __initconst = {
- PLL_4600_RATE(192000000, 48, 3, 1, 0, 0),
- PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
- PLL_4600_RATE(180000000, 45, 3, 1, 0, 0),
- PLL_4600_RATE( 73727996, 73, 3, 3, 47710, 1),
- PLL_4600_RATE( 67737602, 90, 4, 3, 20762, 1),
- PLL_4600_RATE( 49151992, 49, 3, 3, 9961, 0),
- PLL_4600_RATE( 45158401, 45, 3, 3, 10381, 0),
+ PLL_4600_RATE(24 * MHZ, 192000000, 48, 3, 1, 0, 0),
+ PLL_4600_RATE(24 * MHZ, 180633605, 45, 3, 1, 10381, 0),
+ PLL_4600_RATE(24 * MHZ, 180000000, 45, 3, 1, 0, 0),
+ PLL_4600_RATE(24 * MHZ, 73727996, 73, 3, 3, 47710, 1),
+ PLL_4600_RATE(24 * MHZ, 67737602, 90, 4, 3, 20762, 1),
+ PLL_4600_RATE(24 * MHZ, 49151992, 49, 3, 3, 9961, 0),
+ PLL_4600_RATE(24 * MHZ, 45158401, 45, 3, 3, 10381, 0),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4210_vpll_rates[] __initconst = {
- PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
- PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1),
- PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
- PLL_4650_RATE(110000000, 53, 3, 2, 2048, 0, 17, 0),
- PLL_4650_RATE( 55360351, 53, 3, 3, 2417, 0, 17, 0),
+ PLL_4650_RATE(24 * MHZ, 360000000, 44, 3, 0, 1024, 0, 14, 0),
+ PLL_4650_RATE(24 * MHZ, 324000000, 53, 2, 1, 1024, 1, 1, 1),
+ PLL_4650_RATE(24 * MHZ, 259617187, 63, 3, 1, 1950, 0, 20, 1),
+ PLL_4650_RATE(24 * MHZ, 110000000, 53, 3, 2, 2048, 0, 17, 0),
+ PLL_4650_RATE(24 * MHZ, 55360351, 53, 3, 3, 2417, 0, 17, 0),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_apll_rates[] __initconst = {
- PLL_35XX_RATE(1704000000, 213, 3, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 4, 0),
- PLL_35XX_RATE(1100000000, 275, 6, 0),
- PLL_35XX_RATE(1000000000, 125, 3, 0),
- PLL_35XX_RATE( 900000000, 150, 4, 0),
- PLL_35XX_RATE( 800000000, 100, 3, 0),
- PLL_35XX_RATE( 700000000, 175, 3, 1),
- PLL_35XX_RATE( 600000000, 200, 4, 1),
- PLL_35XX_RATE( 500000000, 125, 3, 1),
- PLL_35XX_RATE( 400000000, 100, 3, 1),
- PLL_35XX_RATE( 300000000, 200, 4, 2),
- PLL_35XX_RATE( 200000000, 100, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 1704000000, 213, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 125, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 100, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 125, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 100, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 100, 3, 2),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_epll_rates[] __initconst = {
- PLL_36XX_RATE(192000000, 48, 3, 1, 0),
- PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
- PLL_36XX_RATE(180000000, 45, 3, 1, 0),
- PLL_36XX_RATE( 73727996, 73, 3, 3, 47710),
- PLL_36XX_RATE( 67737602, 90, 4, 3, 20762),
- PLL_36XX_RATE( 49151992, 49, 3, 3, 9961),
- PLL_36XX_RATE( 45158401, 45, 3, 3, 10381),
+ PLL_36XX_RATE(24 * MHZ, 196608001, 197, 3, 3, -25690),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 48, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 180633605, 45, 3, 1, 10381),
+ PLL_36XX_RATE(24 * MHZ, 180000000, 45, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 73727996, 73, 3, 3, 47710),
+ PLL_36XX_RATE(24 * MHZ, 67737602, 90, 4, 3, 20762),
+ PLL_36XX_RATE(24 * MHZ, 49151992, 49, 3, 3, 9961),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 45, 3, 3, 10381),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst = {
- PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
- PLL_36XX_RATE(440000000, 110, 3, 1, 0),
- PLL_36XX_RATE(350000000, 175, 3, 2, 0),
- PLL_36XX_RATE(266000000, 133, 3, 2, 0),
- PLL_36XX_RATE(160000000, 160, 3, 3, 0),
- PLL_36XX_RATE(106031250, 53, 3, 2, 1024),
- PLL_36XX_RATE( 53015625, 53, 3, 3, 1024),
+ PLL_36XX_RATE(24 * MHZ, 533000000, 133, 3, 1, 16384),
+ PLL_36XX_RATE(24 * MHZ, 440000000, 110, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 350000000, 175, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 133, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 106031250, 53, 3, 2, 1024),
+ PLL_36XX_RATE(24 * MHZ, 53015625, 53, 3, 3, 1024),
{ /* sentinel */ }
};
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
new file mode 100644
index 000000000000..93306283d764
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 Samsung Electronics Co., Ltd.
+// Author: Marek Szyprowski <m.szyprowski@samsung.com>
+// Common Clock Framework support for Exynos5 power-domain dependent clocks
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "clk.h"
+#include "clk-exynos5-subcmu.h"
+
+static struct samsung_clk_provider *ctx;
+static const struct exynos5_subcmu_info *cmu;
+static int nr_cmus;
+
+static void exynos5_subcmu_clk_save(void __iomem *base,
+ struct exynos5_subcmu_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd) {
+ rd->save = readl(base + rd->offset);
+ writel((rd->save & ~rd->mask) | rd->value, base + rd->offset);
+ rd->save &= rd->mask;
+ }
+};
+
+static void exynos5_subcmu_clk_restore(void __iomem *base,
+ struct exynos5_subcmu_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ writel((readl(base + rd->offset) & ~rd->mask) | rd->save,
+ base + rd->offset);
+}
+
+static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
+ const struct samsung_gate_clock *list, int nr_clk)
+{
+ while (nr_clk--)
+ samsung_clk_add_lookup(ctx, ERR_PTR(-EPROBE_DEFER), list++->id);
+}
+
+/*
+ * Pass the needed clock provider context and register sub-CMU clocks
+ *
+ * NOTE: This function has to be called from the main, OF_CLK_DECLARE-
+ * initialized clock provider driver. This happens very early during boot
+ * process. Then this driver, during core_initcall registers two platform
+ * drivers: one which binds to the same device-tree node as OF_CLK_DECLARE
+ * driver and second, for handling its per-domain child-devices. Those
+ * platform drivers are bound to their devices a bit later in arch_initcall,
+ * when OF-core populates all device-tree nodes.
+ */
+void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
+ const struct exynos5_subcmu_info *_cmu)
+{
+ ctx = _ctx;
+ cmu = _cmu;
+ nr_cmus = _nr_cmus;
+
+ for (; _nr_cmus--; _cmu++) {
+ exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
+ _cmu->nr_gate_clks);
+ exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
+ _cmu->nr_suspend_regs);
+ }
+}
+
+static int __maybe_unused exynos5_subcmu_suspend(struct device *dev)
+{
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ exynos5_subcmu_clk_save(ctx->reg_base, info->suspend_regs,
+ info->nr_suspend_regs);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused exynos5_subcmu_resume(struct device *dev)
+{
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ exynos5_subcmu_clk_restore(ctx->reg_base, info->suspend_regs,
+ info->nr_suspend_regs);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return 0;
+}
+
+static int __init exynos5_subcmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get(dev);
+
+ ctx->dev = dev;
+ samsung_clk_register_div(ctx, info->div_clks, info->nr_div_clks);
+ samsung_clk_register_gate(ctx, info->gate_clks, info->nr_gate_clks);
+ ctx->dev = NULL;
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops exynos5_subcmu_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos5_subcmu_suspend,
+ exynos5_subcmu_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos5_subcmu_driver __refdata = {
+ .driver = {
+ .name = "exynos5-subcmu",
+ .suppress_bind_attrs = true,
+ .pm = &exynos5_subcmu_pm_ops,
+ },
+ .probe = exynos5_subcmu_probe,
+};
+
+static int __init exynos5_clk_register_subcmu(struct device *parent,
+ const struct exynos5_subcmu_info *info,
+ struct device_node *pd_node)
+{
+ struct of_phandle_args genpdspec = { .np = pd_node };
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(info->pd_name, -1);
+ pdev->dev.parent = parent;
+ pdev->driver_override = "exynos5-subcmu";
+ platform_set_drvdata(pdev, (void *)info);
+ of_genpd_add_device(&genpdspec, &pdev->dev);
+ platform_device_add(pdev);
+
+ return 0;
+}
+
+static int __init exynos5_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ const char *name;
+ int i;
+
+ for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ if (of_property_read_string(np, "label", &name) < 0)
+ continue;
+ for (i = 0; i < nr_cmus; i++)
+ if (strcmp(cmu[i].pd_name, name) == 0)
+ exynos5_clk_register_subcmu(&pdev->dev,
+ &cmu[i], np);
+ }
+ return 0;
+}
+
+static const struct of_device_id exynos5_clk_of_match[] = {
+ { .compatible = "samsung,exynos5250-clock", },
+ { .compatible = "samsung,exynos5420-clock", },
+ { .compatible = "samsung,exynos5800-clock", },
+ { },
+};
+
+static struct platform_driver exynos5_clk_driver __refdata = {
+ .driver = {
+ .name = "exynos5-clock",
+ .of_match_table = exynos5_clk_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos5_clk_probe,
+};
+
+static int __init exynos5_clk_drv_init(void)
+{
+ platform_driver_register(&exynos5_clk_driver);
+ platform_driver_register(&exynos5_subcmu_driver);
+ return 0;
+}
+core_initcall(exynos5_clk_drv_init);
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
new file mode 100644
index 000000000000..755ee8aaa3de
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CLK_EXYNOS5_SUBCMU_H
+#define __CLK_EXYNOS5_SUBCMU_H
+
+struct exynos5_subcmu_reg_dump {
+ u32 offset;
+ u32 value;
+ u32 mask;
+ u32 save;
+};
+
+struct exynos5_subcmu_info {
+ const struct samsung_div_clock *div_clks;
+ unsigned int nr_div_clks;
+ const struct samsung_gate_clock *gate_clks;
+ unsigned int nr_gate_clks;
+ struct exynos5_subcmu_reg_dump *suspend_regs;
+ unsigned int nr_suspend_regs;
+ const char *pd_name;
+};
+
+void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
+ const struct exynos5_subcmu_info *cmu);
+
+#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 9b073c98a891..347fd80c351b 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -18,6 +18,7 @@
#include "clk.h"
#include "clk-cpu.h"
+#include "clk-exynos5-subcmu.h"
#define APLL_LOCK 0x0
#define APLL_CON0 0x100
@@ -560,6 +561,8 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
0),
GATE(CLK_GSCL3, "gscl3", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 3, 0,
0),
+ GATE(CLK_CAMIF_TOP, "camif_top", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 4, 0, 0),
GATE(CLK_GSCL_WA, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
GATE(CLK_GSCL_WB, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "mout_aclk266_gscl_sub",
@@ -570,18 +573,11 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE_IP_GSCL, 9, 0, 0),
GATE(CLK_SMMU_GSCL3, "smmu_gscl3", "mout_aclk266_gscl_sub",
GATE_IP_GSCL, 10, 0, 0),
+ GATE(CLK_SMMU_FIMC_LITE0, "smmu_fimc_lite0", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 11, 0, 0),
+ GATE(CLK_SMMU_FIMC_LITE1, "smmu_fimc_lite1", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 12, 0, 0),
- GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
- 0),
- GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
- 0),
- GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
- 0),
- GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
- GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
- 0),
- GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
- 0),
GATE(CLK_MFC, "mfc", "mout_aclk333_sub", GATE_IP_MFC, 0, 0, 0),
GATE(CLK_SMMU_MFCR, "smmu_mfcr", "mout_aclk333_sub", GATE_IP_MFC, 1, 0,
@@ -671,10 +667,6 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0),
GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
- GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 9, 0, 0),
- GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 8, 0, 0),
GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
GATE(CLK_SMMU_FIMC_ISP, "smmu_fimc_isp", "mout_aclk_266_isp_sub",
GATE_IP_ISP0, 8, 0, 0),
@@ -698,48 +690,80 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE_IP_ISP1, 7, 0, 0),
};
+static const struct samsung_gate_clock exynos5250_disp_gate_clks[] __initconst = {
+ GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
+ 0),
+ GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
+ 0),
+ GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
+ 0),
+ GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
+ 0),
+ GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
+ 0),
+ GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 9, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 8, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5250_disp_suspend_regs[] = {
+ { GATE_IP_DISP1, 0xffffffff, 0xffffffff }, /* DISP1 gates */
+ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_aclk200_disp1_sub */
+ { SRC_TOP3, 0, BIT(6) }, /* MUX mout_aclk300_disp1_sub */
+};
+
+static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
+ .gate_clks = exynos5250_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5250_disp_gate_clks),
+ .suspend_regs = exynos5250_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5250_disp_suspend_regs),
+ .pd_name = "DISP1",
+};
+
static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
- PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 266, 3, 3, 0),
/* Not in UM, but need for eDP on snow */
- PLL_36XX_RATE(70500000, 94, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 70500000, 94, 2, 4, 0),
{ },
};
static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
- PLL_36XX_RATE(192000000, 64, 2, 2, 0),
- PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
- PLL_36XX_RATE(180000000, 90, 3, 2, 0),
- PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
- PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
- PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
- PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
- PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 64, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 180633605, 90, 3, 2, 20762),
+ PLL_36XX_RATE(24 * MHZ, 180000000, 90, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737602, 90, 2, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 49152000, 98, 3, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 90, 3, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 32768001, 131, 3, 5, 4719),
{ },
};
static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
- /* PLL_35XX_RATE(rate, m, p, s) */
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 4, 0),
- PLL_35XX_RATE(1100000000, 275, 6, 0),
- PLL_35XX_RATE(1000000000, 125, 3, 0),
- PLL_35XX_RATE(900000000, 150, 4, 0),
- PLL_35XX_RATE(800000000, 100, 3, 0),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(600000000, 200, 4, 1),
- PLL_35XX_RATE(500000000, 125, 3, 1),
- PLL_35XX_RATE(400000000, 100, 3, 1),
- PLL_35XX_RATE(300000000, 200, 4, 2),
- PLL_35XX_RATE(200000000, 100, 3, 2),
+ /* PLL_35XX_RATE(fin, rate, m, p, s) */
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 125, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 100, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 125, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 100, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 100, 3, 2),
};
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
@@ -859,10 +883,11 @@ static void __init exynos5250_clk_init(struct device_node *np)
__raw_writel(tmp, reg_base + PWR_CTRL2);
exynos5250_clk_sleep_init();
+ exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
samsung_clk_of_add_provider(np, ctx);
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
_get_rate("div_arm2"));
}
-CLK_OF_DECLARE(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index fd1d9bfc151b..2cc2583abd87 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -23,57 +23,57 @@
* DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
*/
static const struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initconst = {
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 400, 4, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE(933000000, 311, 4, 1),
- PLL_35XX_RATE(900000000, 300, 4, 1),
- PLL_35XX_RATE(800000000, 200, 3, 1),
- PLL_35XX_RATE(733000000, 733, 12, 1),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(667000000, 667, 12, 1),
- PLL_35XX_RATE(633000000, 211, 4, 1),
- PLL_35XX_RATE(620000000, 310, 3, 2),
- PLL_35XX_RATE(600000000, 400, 4, 2),
- PLL_35XX_RATE(543000000, 362, 4, 2),
- PLL_35XX_RATE(533000000, 533, 6, 2),
- PLL_35XX_RATE(500000000, 250, 3, 2),
- PLL_35XX_RATE(450000000, 300, 4, 2),
- PLL_35XX_RATE(400000000, 200, 3, 2),
- PLL_35XX_RATE(350000000, 175, 3, 2),
- PLL_35XX_RATE(300000000, 400, 4, 3),
- PLL_35XX_RATE(266000000, 266, 3, 3),
- PLL_35XX_RATE(200000000, 200, 3, 3),
- PLL_35XX_RATE(160000000, 160, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 400, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 933000000, 311, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 300, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 733000000, 733, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 667000000, 667, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 633000000, 211, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 620000000, 310, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 400, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000, 362, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 450000000, 300, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000, 175, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 400, 4, 3),
+ PLL_35XX_RATE(24 * MHZ, 266000000, 266, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 160000000, 160, 3, 3),
};
/*
* Applicable for 2650 Type PLL for AUD_PLL.
*/
static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(1600000000, 200, 3, 0, 0),
- PLL_36XX_RATE(1200000000, 100, 2, 0, 0),
- PLL_36XX_RATE(1000000000, 250, 3, 1, 0),
- PLL_36XX_RATE(800000000, 200, 3, 1, 0),
- PLL_36XX_RATE(600000000, 100, 2, 1, 0),
- PLL_36XX_RATE(532000000, 266, 3, 2, 0),
- PLL_36XX_RATE(480000000, 160, 2, 2, 0),
- PLL_36XX_RATE(432000000, 144, 2, 2, 0),
- PLL_36XX_RATE(400000000, 200, 3, 2, 0),
- PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
- PLL_36XX_RATE(333000000, 111, 2, 2, 0),
- PLL_36XX_RATE(300000000, 100, 2, 2, 0),
- PLL_36XX_RATE(266000000, 266, 3, 3, 0),
- PLL_36XX_RATE(200000000, 200, 3, 3, 0),
- PLL_36XX_RATE(166000000, 166, 3, 3, 0),
- PLL_36XX_RATE(133000000, 266, 3, 4, 0),
- PLL_36XX_RATE(100000000, 200, 3, 4, 0),
- PLL_36XX_RATE(66000000, 176, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 1600000000, 200, 3, 0, 0),
+ PLL_36XX_RATE(24 * MHZ, 1200000000, 100, 2, 0, 0),
+ PLL_36XX_RATE(24 * MHZ, 1000000000, 250, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 532000000, 266, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 480000000, 160, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 432000000, 144, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 394073128, 459, 7, 2, 49282),
+ PLL_36XX_RATE(24 * MHZ, 333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 200000000, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 166000000, 166, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 133000000, 266, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 100000000, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 66000000, 176, 2, 5, 0),
};
/* CMU_AUD */
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index fc471a49e8f4..0a0b09591e6f 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -226,16 +226,16 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
};
static const struct samsung_pll_rate_table exynos5410_pll2550x_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(333000000U, 111, 2, 2, 0),
- PLL_36XX_RATE(300000000U, 100, 2, 2, 0),
- PLL_36XX_RATE(266000000U, 266, 3, 3, 0),
- PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(192000000U, 192, 3, 3, 0),
- PLL_36XX_RATE(166000000U, 166, 3, 3, 0),
- PLL_36XX_RATE(133000000U, 266, 3, 4, 0),
- PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE(66000000U, 176, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 333000000U, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 300000000U, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000U, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 192000000U, 192, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 166000000U, 166, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 133000000U, 266, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 66000000U, 176, 2, 5, 0),
};
static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 45d34f601e9e..95e1bf69449b 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -19,6 +19,7 @@
#include "clk.h"
#include "clk-cpu.h"
+#include "clk-exynos5-subcmu.h"
#define APLL_LOCK 0x0
#define APLL_CON0 0x100
@@ -620,7 +621,8 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
mout_group5_5800_p, SRC_TOP7, 16, 2),
- MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2),
+ MUX_F(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
};
@@ -863,7 +865,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
- DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
DIV(CLK_DOUT_ACLK400_DISP1, "dout_aclk400_disp1",
"mout_aclk400_disp1", DIV_TOP2, 4, 3),
@@ -912,8 +913,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
- /* Mfc Block */
- DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
/* PCM */
DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
@@ -932,8 +931,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_spi2_pre", "dout_spi2", DIV_PERIC4, 24, 8),
/* GSCL Block */
- DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl",
- DIV2_RATIO0, 4, 2),
DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
/* MSCL Block */
@@ -1190,8 +1187,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "mout_user_aclk333_432_gscl",
GATE_TOP_SCLK_GSCL, 7, 0, 0),
- GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
- GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
GATE(CLK_FIMC_3AA, "fimc_3aa", "aclk333_432_gscl",
GATE_IP_GSCL0, 4, 0, 0),
GATE(CLK_FIMC_LITE0, "fimc_lite0", "aclk333_432_gscl",
@@ -1205,10 +1200,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE_IP_GSCL1, 3, 0, 0),
GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300",
- GATE_IP_GSCL1, 6, 0, 0),
- GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300",
- GATE_IP_GSCL1, 7, 0, 0),
GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
@@ -1227,18 +1218,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
GATE_IP_MSCL, 10, 0, 0),
- GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
- GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
- GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
- GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
- GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk",
- GATE_IP_DISP1, 7, 0, 0),
- GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk",
- GATE_IP_DISP1, 8, 0, 0),
- GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1",
- GATE_IP_DISP1, 9, 0, 0),
-
/* ISP */
GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1255,48 +1234,138 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+};
+
+static const struct samsung_div_clock exynos5x_disp_div_clks[] __initconst = {
+ DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_disp_gate_clks[] __initconst = {
+ GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
+ GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk",
+ GATE_IP_DISP1, 7, 0, 0),
+ GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk",
+ GATE_IP_DISP1, 8, 0, 0),
+ GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1",
+ GATE_IP_DISP1, 9, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_disp_suspend_regs[] = {
+ { GATE_IP_DISP1, 0xffffffff, 0xffffffff }, /* DISP1 gates */
+ { SRC_TOP5, 0, BIT(0) }, /* MUX mout_user_aclk400_disp1 */
+ { SRC_TOP5, 0, BIT(24) }, /* MUX mout_user_aclk300_disp1 */
+ { SRC_TOP3, 0, BIT(8) }, /* MUX mout_user_aclk200_disp1 */
+ { DIV2_RATIO0, 0, 0x30000 }, /* DIV dout_disp1_blk */
+};
+
+static const struct samsung_div_clock exynos5x_gsc_div_clks[] __initconst = {
+ DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl",
+ DIV2_RATIO0, 4, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_gsc_gate_clks[] __initconst = {
+ GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
+ GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 7, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
+ { GATE_IP_GSCL0, 0x3, 0x3 }, /* GSC gates */
+ { GATE_IP_GSCL1, 0xc0, 0xc0 }, /* GSC gates */
+ { SRC_TOP5, 0, BIT(28) }, /* MUX mout_user_aclk300_gscl */
+ { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */
+};
+
+static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
+ DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_mfc_gate_clks[] __initconst = {
GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
GATE(CLK_SMMU_MFCL, "smmu_mfcl", "dout_mfc_blk", GATE_IP_MFC, 1, 0, 0),
GATE(CLK_SMMU_MFCR, "smmu_mfcr", "dout_mfc_blk", GATE_IP_MFC, 2, 0, 0),
+};
- GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
+ { GATE_IP_MFC, 0xffffffff, 0xffffffff }, /* MFC gates */
+ { SRC_TOP4, 0, BIT(28) }, /* MUX mout_user_aclk333 */
+ { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
+};
+
+static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
+ {
+ .div_clks = exynos5x_disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
+ .gate_clks = exynos5x_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
+ .suspend_regs = exynos5x_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
+ .pd_name = "DISP",
+ }, {
+ .div_clks = exynos5x_gsc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
+ .gate_clks = exynos5x_gsc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
+ .suspend_regs = exynos5x_gsc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
+ .pd_name = "GSC",
+ }, {
+ .div_clks = exynos5x_mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
+ .gate_clks = exynos5x_mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
+ .suspend_regs = exynos5x_mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
+ .pd_name = "MFC",
+ },
};
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
- PLL_35XX_RATE(2000000000, 250, 3, 0),
- PLL_35XX_RATE(1900000000, 475, 6, 0),
- PLL_35XX_RATE(1800000000, 225, 3, 0),
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 2, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE(900000000, 150, 2, 1),
- PLL_35XX_RATE(800000000, 200, 3, 1),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(600000000, 200, 2, 2),
- PLL_35XX_RATE(500000000, 250, 3, 2),
- PLL_35XX_RATE(400000000, 200, 3, 2),
- PLL_35XX_RATE(300000000, 200, 2, 3),
- PLL_35XX_RATE(200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 2000000000, 250, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1900000000, 475, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1800000000, 225, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 2, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 2, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 2, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
};
static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
- PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
- PLL_36XX_RATE(361267218U, 301, 5, 2, 3671),
- PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(196608001U, 197, 3, 3, -25690),
- PLL_36XX_RATE(180633609U, 301, 5, 3, 3671),
- PLL_36XX_RATE(131072006U, 131, 3, 3, 4719),
- PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719),
- PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690),
- PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 600000000U, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(24 * MHZ, 361267218U, 301, 5, 2, 3671),
+ PLL_36XX_RATE(24 * MHZ, 200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 196608001U, 197, 3, 3, -25690),
+ PLL_36XX_RATE(24 * MHZ, 180633609U, 301, 5, 3, 3671),
+ PLL_36XX_RATE(24 * MHZ, 131072006U, 131, 3, 3, 4719),
+ PLL_36XX_RATE(24 * MHZ, 100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 73728000U, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737602U, 90, 2, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 65536003U, 131, 3, 4, 4719),
+ PLL_36XX_RATE(24 * MHZ, 49152000U, 197, 3, 5, -25690),
+ PLL_36XX_RATE(24 * MHZ, 45158401U, 90, 3, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 32768001U, 131, 3, 5, 4719),
};
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
@@ -1472,6 +1541,8 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
exynos5420_clk_sleep_init();
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
+ exynos5x_subcmus);
samsung_clk_of_add_provider(np, ctx);
}
@@ -1480,10 +1551,12 @@ static void __init exynos5420_clk_init(struct device_node *np)
{
exynos5x_clk_init(np, EXYNOS5420);
}
-CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5420_clk, "samsung,exynos5420-clock",
+ exynos5420_clk_init);
static void __init exynos5800_clk_init(struct device_node *np)
{
exynos5x_clk_init(np, EXYNOS5800);
}
-CLK_OF_DECLARE(exynos5800_clk, "samsung,exynos5800-clock", exynos5800_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5800_clk, "samsung,exynos5800-clock",
+ exynos5800_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index db270908037a..5305ace514b2 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -703,68 +703,69 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
* & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL
*/
static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = {
- PLL_35XX_RATE(2500000000U, 625, 6, 0),
- PLL_35XX_RATE(2400000000U, 500, 5, 0),
- PLL_35XX_RATE(2300000000U, 575, 6, 0),
- PLL_35XX_RATE(2200000000U, 550, 6, 0),
- PLL_35XX_RATE(2100000000U, 350, 4, 0),
- PLL_35XX_RATE(2000000000U, 500, 6, 0),
- PLL_35XX_RATE(1900000000U, 475, 6, 0),
- PLL_35XX_RATE(1800000000U, 375, 5, 0),
- PLL_35XX_RATE(1700000000U, 425, 6, 0),
- PLL_35XX_RATE(1600000000U, 400, 6, 0),
- PLL_35XX_RATE(1500000000U, 250, 4, 0),
- PLL_35XX_RATE(1400000000U, 350, 6, 0),
- PLL_35XX_RATE(1332000000U, 222, 4, 0),
- PLL_35XX_RATE(1300000000U, 325, 6, 0),
- PLL_35XX_RATE(1200000000U, 500, 5, 1),
- PLL_35XX_RATE(1100000000U, 550, 6, 1),
- PLL_35XX_RATE(1086000000U, 362, 4, 1),
- PLL_35XX_RATE(1066000000U, 533, 6, 1),
- PLL_35XX_RATE(1000000000U, 500, 6, 1),
- PLL_35XX_RATE(933000000U, 311, 4, 1),
- PLL_35XX_RATE(921000000U, 307, 4, 1),
- PLL_35XX_RATE(900000000U, 375, 5, 1),
- PLL_35XX_RATE(825000000U, 275, 4, 1),
- PLL_35XX_RATE(800000000U, 400, 6, 1),
- PLL_35XX_RATE(733000000U, 733, 12, 1),
- PLL_35XX_RATE(700000000U, 175, 3, 1),
- PLL_35XX_RATE(667000000U, 222, 4, 1),
- PLL_35XX_RATE(633000000U, 211, 4, 1),
- PLL_35XX_RATE(600000000U, 500, 5, 2),
- PLL_35XX_RATE(552000000U, 460, 5, 2),
- PLL_35XX_RATE(550000000U, 550, 6, 2),
- PLL_35XX_RATE(543000000U, 362, 4, 2),
- PLL_35XX_RATE(533000000U, 533, 6, 2),
- PLL_35XX_RATE(500000000U, 500, 6, 2),
- PLL_35XX_RATE(444000000U, 370, 5, 2),
- PLL_35XX_RATE(420000000U, 350, 5, 2),
- PLL_35XX_RATE(400000000U, 400, 6, 2),
- PLL_35XX_RATE(350000000U, 350, 6, 2),
- PLL_35XX_RATE(333000000U, 222, 4, 2),
- PLL_35XX_RATE(300000000U, 500, 5, 3),
- PLL_35XX_RATE(278000000U, 556, 6, 3),
- PLL_35XX_RATE(266000000U, 532, 6, 3),
- PLL_35XX_RATE(250000000U, 500, 6, 3),
- PLL_35XX_RATE(200000000U, 400, 6, 3),
- PLL_35XX_RATE(166000000U, 332, 6, 3),
- PLL_35XX_RATE(160000000U, 320, 6, 3),
- PLL_35XX_RATE(133000000U, 532, 6, 4),
- PLL_35XX_RATE(100000000U, 400, 6, 4),
+ PLL_35XX_RATE(24 * MHZ, 2500000000U, 625, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2400000000U, 500, 5, 0),
+ PLL_35XX_RATE(24 * MHZ, 2300000000U, 575, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2200000000U, 550, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2100000000U, 350, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 2000000000U, 500, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1900000000U, 475, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1800000000U, 375, 5, 0),
+ PLL_35XX_RATE(24 * MHZ, 1700000000U, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000U, 400, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000U, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000U, 350, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1332000000U, 222, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000U, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000U, 500, 5, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000U, 550, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1086000000U, 362, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1066000000U, 533, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000U, 500, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 933000000U, 311, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 921000000U, 307, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000U, 375, 5, 1),
+ PLL_35XX_RATE(24 * MHZ, 825000000U, 275, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000U, 400, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 733000000U, 733, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000U, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 666000000U, 222, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 633000000U, 211, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000U, 500, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 552000000U, 460, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 550000000U, 550, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000U, 362, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000U, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000U, 500, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 444000000U, 370, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 420000000U, 350, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000U, 400, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000U, 350, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 333000000U, 222, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000U, 500, 5, 3),
+ PLL_35XX_RATE(24 * MHZ, 278000000U, 556, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 266000000U, 532, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 250000000U, 500, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000U, 400, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 166000000U, 332, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 160000000U, 320, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 133000000U, 532, 6, 4),
+ PLL_35XX_RATE(24 * MHZ, 100000000U, 400, 6, 4),
{ /* sentinel */ }
};
/* AUD_PLL */
static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = {
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
- PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
- PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
- PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
- PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
- PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
- PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
- PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(24 * MHZ, 384000000U, 128, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 368639991U, 246, 4, 2, -15729),
+ PLL_36XX_RATE(24 * MHZ, 361507202U, 181, 3, 2, -16148),
+ PLL_36XX_RATE(24 * MHZ, 338687988U, 113, 2, 2, -6816),
+ PLL_36XX_RATE(24 * MHZ, 294912002U, 98, 1, 3, 19923),
+ PLL_36XX_RATE(24 * MHZ, 288000000U, 96, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 252000000U, 84, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 196608001U, 197, 3, 3, -25690),
{ /* sentinel */ }
};
@@ -1672,7 +1673,7 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
ENABLE_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_IOCLK_I2S1_BCLK, "sclk_ioclk_i2s1_bclk",
"ioclk_i2s1_bclk_in", ENABLE_SCLK_PERIC, 10,
- CLK_SET_RATE_PARENT, 0),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPDIF, "sclk_spdif", "sclk_spdif_peric",
ENABLE_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
@@ -5513,10 +5514,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_base)) {
- dev_err(dev, "failed to map registers\n");
+ if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- }
for (i = 0; i < info->nr_clk_ids; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 5931a4140c3d..492d51691080 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = {
};
static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+ PLL_36XX_RATE(24 * MHZ, 491519897, 20, 1, 0, 31457),
{},
};
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 61eb8abbfd9c..ca57b3dfa814 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -41,35 +41,62 @@ enum samsung_pll_type {
pll_1460x,
};
-#define PLL_35XX_RATE(_rate, _m, _p, _s) \
+#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
+ ((u64)(_fin) * (BIT(_ks) * (_m) + (_k)) / BIT(_ks) / ((_p) << (_s)))
+#define PLL_VALID_RATE(_fin, _fout, _m, _p, _s, _k, _ks) ((_fout) + \
+ BUILD_BUG_ON_ZERO(PLL_RATE(_fin, _m, _p, _s, _k, _ks) != (_fout)))
+
+#define PLL_35XX_RATE(_fin, _rate, _m, _p, _s) \
+ { \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, 0, 16), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_S3C2410_MPLL_RATE(_fin, _rate, _m, _p, _s) \
+ { \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m + 8, _p + 2, _s, 0, 16), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_S3C2440_MPLL_RATE(_fin, _rate, _m, _p, _s) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ 2 * (_m + 8), _p + 2, _s, 0, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
}
-#define PLL_36XX_RATE(_rate, _m, _p, _s, _k) \
+#define PLL_36XX_RATE(_fin, _rate, _m, _p, _s, _k) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
.kdiv = (_k), \
}
-#define PLL_45XX_RATE(_rate, _m, _p, _s, _afc) \
+#define PLL_4508_RATE(_fin, _rate, _m, _p, _s, _afc) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s - 1, 0, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
.afc = (_afc), \
}
-#define PLL_4600_RATE(_rate, _m, _p, _s, _k, _vsel) \
+#define PLL_4600_RATE(_fin, _rate, _m, _p, _s, _k, _vsel) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
@@ -77,9 +104,10 @@ enum samsung_pll_type {
.vsel = (_vsel), \
}
-#define PLL_4650_RATE(_rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
+#define PLL_4650_RATE(_fin, _rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 10), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index e0650c33863b..a9c887475054 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -95,7 +95,7 @@ static void __init s3c2410_clk_sleep_init(void) {}
PNAME(fclk_p) = { "mpll", "div_slow" };
-struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1),
};
@@ -111,12 +111,12 @@ static struct clk_div_table divslow_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d),
DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1),
};
-struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0),
GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0),
GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0),
@@ -135,7 +135,7 @@ struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
};
/* should be added _after_ the soc-specific clocks are created */
-struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
ALIAS(PCLK_ADC, NULL, "adc"),
ALIAS(PCLK_RTC, NULL, "rtc"),
@@ -162,34 +162,34 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
/* sorted in descending order */
/* 2410A extras */
- PLL_35XX_RATE(270000000, 127, 1, 1),
- PLL_35XX_RATE(268000000, 126, 1, 1),
- PLL_35XX_RATE(266000000, 125, 1, 1),
- PLL_35XX_RATE(226000000, 105, 1, 1),
- PLL_35XX_RATE(210000000, 132, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 270000000, 127, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 268000000, 126, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 266000000, 125, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 226000000, 105, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 210000000, 132, 2, 1),
/* 2410 common */
- PLL_35XX_RATE(203000000, 161, 3, 1),
- PLL_35XX_RATE(192000000, 88, 1, 1),
- PLL_35XX_RATE(186000000, 85, 1, 1),
- PLL_35XX_RATE(180000000, 82, 1, 1),
- PLL_35XX_RATE(170000000, 77, 1, 1),
- PLL_35XX_RATE(158000000, 71, 1, 1),
- PLL_35XX_RATE(152000000, 68, 1, 1),
- PLL_35XX_RATE(147000000, 90, 2, 1),
- PLL_35XX_RATE(135000000, 82, 2, 1),
- PLL_35XX_RATE(124000000, 116, 1, 2),
- PLL_35XX_RATE(118000000, 150, 2, 2),
- PLL_35XX_RATE(113000000, 105, 1, 2),
- PLL_35XX_RATE(101000000, 127, 2, 2),
- PLL_35XX_RATE(90000000, 112, 2, 2),
- PLL_35XX_RATE(85000000, 105, 2, 2),
- PLL_35XX_RATE(79000000, 71, 1, 2),
- PLL_35XX_RATE(68000000, 82, 2, 2),
- PLL_35XX_RATE(56000000, 142, 2, 3),
- PLL_35XX_RATE(48000000, 120, 2, 3),
- PLL_35XX_RATE(51000000, 161, 3, 3),
- PLL_35XX_RATE(45000000, 82, 1, 3),
- PLL_35XX_RATE(34000000, 82, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 202800000, 161, 3, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 192000000, 88, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 186000000, 85, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 180000000, 82, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 170000000, 77, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 158000000, 71, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 152000000, 68, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 147000000, 90, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 135000000, 82, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 124000000, 116, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 118500000, 150, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 113000000, 105, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 101250000, 127, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 90000000, 112, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 84750000, 105, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 79000000, 71, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 67500000, 82, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 56250000, 142, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 48000000, 120, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 50700000, 161, 3, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 45000000, 82, 1, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 33750000, 82, 2, 3),
{ /* sentinel */ },
};
@@ -200,11 +200,11 @@ static struct samsung_pll_clock s3c2410_plls[] __initdata = {
LOCKTIME, UPLLCON, NULL),
};
-struct samsung_div_clock s3c2410_dividers[] __initdata = {
+static struct samsung_div_clock s3c2410_dividers[] __initdata = {
DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1),
};
-struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
/*
* armclk is directly supplied by the fclk, without
* switching possibility like on the s3c244x below.
@@ -215,7 +215,7 @@ struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
FFACTOR(UCLK, "uclk", "upll", 1, 1, 0),
};
-struct samsung_clock_alias s3c2410_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2410_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"),
@@ -229,33 +229,33 @@ struct samsung_clock_alias s3c2410_aliases[] __initdata = {
static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = {
/* sorted in descending order */
- PLL_35XX_RATE(400000000, 0x5c, 1, 1),
- PLL_35XX_RATE(390000000, 0x7a, 2, 1),
- PLL_35XX_RATE(380000000, 0x57, 1, 1),
- PLL_35XX_RATE(370000000, 0xb1, 4, 1),
- PLL_35XX_RATE(360000000, 0x70, 2, 1),
- PLL_35XX_RATE(350000000, 0xa7, 4, 1),
- PLL_35XX_RATE(340000000, 0x4d, 1, 1),
- PLL_35XX_RATE(330000000, 0x66, 2, 1),
- PLL_35XX_RATE(320000000, 0x98, 4, 1),
- PLL_35XX_RATE(310000000, 0x93, 4, 1),
- PLL_35XX_RATE(300000000, 0x75, 3, 1),
- PLL_35XX_RATE(240000000, 0x70, 1, 2),
- PLL_35XX_RATE(230000000, 0x6b, 1, 2),
- PLL_35XX_RATE(220000000, 0x66, 1, 2),
- PLL_35XX_RATE(210000000, 0x84, 2, 2),
- PLL_35XX_RATE(200000000, 0x5c, 1, 2),
- PLL_35XX_RATE(190000000, 0x57, 1, 2),
- PLL_35XX_RATE(180000000, 0x70, 2, 2),
- PLL_35XX_RATE(170000000, 0x4d, 1, 2),
- PLL_35XX_RATE(160000000, 0x98, 4, 2),
- PLL_35XX_RATE(150000000, 0x75, 3, 2),
- PLL_35XX_RATE(120000000, 0x70, 1, 3),
- PLL_35XX_RATE(110000000, 0x66, 1, 3),
- PLL_35XX_RATE(100000000, 0x5c, 1, 3),
- PLL_35XX_RATE(90000000, 0x70, 2, 3),
- PLL_35XX_RATE(80000000, 0x98, 4, 3),
- PLL_35XX_RATE(75000000, 0x75, 3, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 400000000, 0x5c, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 390000000, 0x7a, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 380000000, 0x57, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 370000000, 0xb1, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 360000000, 0x70, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 350000000, 0xa7, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 340000000, 0x4d, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 330000000, 0x66, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 320000000, 0x98, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 310000000, 0x93, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 300000000, 0x75, 3, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 240000000, 0x70, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 230000000, 0x6b, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 220000000, 0x66, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 210000000, 0x84, 2, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 200000000, 0x5c, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 190000000, 0x57, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 180000000, 0x70, 2, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 170000000, 0x4d, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 160000000, 0x98, 4, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 150000000, 0x75, 3, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 120000000, 0x70, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 110000000, 0x66, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 100000000, 0x5c, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 90000000, 0x70, 2, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 80000000, 0x98, 4, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 75000000, 0x75, 3, 3),
{ /* sentinel */ },
};
@@ -269,12 +269,12 @@ static struct samsung_pll_clock s3c244x_common_plls[] __initdata = {
PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" };
PNAME(armclk_p) = { "fclk", "hclk" };
-struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2),
MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1),
};
-struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0),
FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT),
};
@@ -291,7 +291,7 @@ static struct clk_div_table div_hclk_3_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1),
DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1),
DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d),
@@ -299,11 +299,11 @@ struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
DIV(0, "div_cam", "upll", CAMDIVN, 0, 3),
};
-struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0),
};
-struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
@@ -318,23 +318,23 @@ struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
PNAME(s3c2440_camif_p) = { "upll", "ff_cam" };
-struct samsung_mux_clock s3c2440_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2440_muxes[] __initdata = {
MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1),
};
-struct samsung_gate_clock s3c2440_gates[] __initdata = {
+static struct samsung_gate_clock s3c2440_gates[] __initdata = {
GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0),
};
/* S3C2442 specific clocks */
-struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
FFACTOR(0, "upll_3", "upll", 1, 3, 0),
};
PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" };
-struct samsung_mux_clock s3c2442_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2442_muxes[] __initdata = {
MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2),
};
@@ -343,7 +343,7 @@ struct samsung_mux_clock s3c2442_muxes[] __initdata = {
* Only necessary until the devicetree-move is complete
*/
#define XTI 1
-struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
FRATE(XTI, "xti", NULL, 0, 0),
};
@@ -468,18 +468,18 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2410_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2410, 0);
+ s3c2410_common_clk_init(np, 0, S3C2410, NULL);
}
CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init);
static void __init s3c2440_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2440, 0);
+ s3c2410_common_clk_init(np, 0, S3C2440, NULL);
}
CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init);
static void __init s3c2442_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2442, 0);
+ s3c2410_common_clk_init(np, 0, S3C2442, NULL);
}
CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index b8340a49921b..6bc94d3aff78 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -27,11 +27,6 @@
#define CLKSRC 0x1c
#define SWRST 0x30
-/* list of PLLs to be registered */
-enum s3c2412_plls {
- mpll, upll,
-};
-
static void __iomem *reg_base;
#ifdef CONFIG_PM_SLEEP
@@ -98,7 +93,7 @@ static struct clk_div_table divxti_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2412_dividers[] __initdata = {
+static struct samsung_div_clock s3c2412_dividers[] __initdata = {
DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d),
DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4),
DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4),
@@ -110,7 +105,7 @@ struct samsung_div_clock s3c2412_dividers[] __initdata = {
DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2),
};
-struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT),
};
@@ -130,7 +125,7 @@ PNAME(msysclk_p) = { "mdivclk", "mpll" };
PNAME(mdivclk_p) = { "xti", "div_xti" };
PNAME(armclk_p) = { "armdiv", "hclk" };
-struct samsung_mux_clock s3c2412_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2412_muxes[] __initdata = {
MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2),
MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2),
MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1),
@@ -144,13 +139,11 @@ struct samsung_mux_clock s3c2412_muxes[] __initdata = {
};
static struct samsung_pll_clock s3c2412_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk",
- LOCKTIME, UPLLCON, NULL),
+ PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", LOCKTIME, MPLLCON, NULL),
+ PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk", LOCKTIME, UPLLCON, NULL),
};
-struct samsung_gate_clock s3c2412_gates[] __initdata = {
+static struct samsung_gate_clock s3c2412_gates[] __initdata = {
GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0),
GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0),
GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0),
@@ -181,7 +174,7 @@ struct samsung_gate_clock s3c2412_gates[] __initdata = {
GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0),
};
-struct samsung_clock_alias s3c2412_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2412_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"),
@@ -231,7 +224,7 @@ static struct notifier_block s3c2412_restart_handler = {
* Only necessary until the devicetree-move is complete
*/
#define XTI 1
-struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
FRATE(XTI, "xti", NULL, 0, 0),
FRATE(0, "ext", NULL, 0, 0),
};
@@ -296,6 +289,6 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2412_clk_init(struct device_node *np)
{
- s3c2412_common_clk_init(np, 0, 0, 0);
+ s3c2412_common_clk_init(np, 0, 0, NULL);
}
CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index d94b85a42356..c46e6d5bc9bc 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -41,11 +41,6 @@ enum supported_socs {
S3C2450,
};
-/* list of PLLs to be registered */
-enum s3c2443_plls {
- mpll, epll,
-};
-
static void __iomem *reg_base;
#ifdef CONFIG_PM_SLEEP
@@ -113,7 +108,7 @@ PNAME(msysclk_p) = { "mpllref", "mpll" };
PNAME(armclk_p) = { "armdiv" , "hclk" };
PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" };
-struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
@@ -141,7 +136,7 @@ static struct clk_div_table mdivclk_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d),
DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2),
DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d),
@@ -154,7 +149,7 @@ struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2),
};
-struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0),
GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0),
GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0),
@@ -188,7 +183,7 @@ struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0),
};
-struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
ALIAS(MSYSCLK, NULL, "msysclk"),
ALIAS(ARMCLK, NULL, "armclk"),
ALIAS(MPLL, NULL, "mpll"),
@@ -225,10 +220,8 @@ struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
/* S3C2416 specific clocks */
static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- [mpll] = PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref",
- LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_6553, EPLL, "epll", "epllref",
- LOCKCON1, EPLLCON, NULL),
+ PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
+ PLL(pll_6553, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
};
PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" };
@@ -245,19 +238,19 @@ static struct clk_div_table armdiv_s3c2416_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2416_dividers[] __initdata = {
+static struct samsung_div_clock s3c2416_dividers[] __initdata = {
DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d),
DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4),
DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2),
};
-struct samsung_mux_clock s3c2416_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2416_muxes[] __initdata = {
MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1),
MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1),
MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1),
};
-struct samsung_gate_clock s3c2416_gates[] __initdata = {
+static struct samsung_gate_clock s3c2416_gates[] __initdata = {
GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0),
GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0),
@@ -267,7 +260,7 @@ struct samsung_gate_clock s3c2416_gates[] __initdata = {
GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0),
};
-struct samsung_clock_alias s3c2416_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2416_aliases[] __initdata = {
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"),
@@ -279,10 +272,8 @@ struct samsung_clock_alias s3c2416_aliases[] __initdata = {
/* S3C2443 specific clocks */
static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- [mpll] = PLL(pll_3000, MPLL, "mpll", "mpllref",
- LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_2126, EPLL, "epll", "epllref",
- LOCKCON1, EPLLCON, NULL),
+ PLL(pll_3000, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
+ PLL(pll_2126, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
};
static struct clk_div_table armdiv_s3c2443_d[] = {
@@ -297,12 +288,12 @@ static struct clk_div_table armdiv_s3c2443_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2443_dividers[] __initdata = {
+static struct samsung_div_clock s3c2443_dividers[] __initdata = {
DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d),
DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
};
-struct samsung_gate_clock s3c2443_gates[] __initdata = {
+static struct samsung_gate_clock s3c2443_gates[] __initdata = {
GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0),
GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0),
@@ -311,7 +302,7 @@ struct samsung_gate_clock s3c2443_gates[] __initdata = {
GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0),
};
-struct samsung_clock_alias s3c2443_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2443_aliases[] __initdata = {
ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
ALIAS(SCLK_CAM, NULL, "camif-upll"),
@@ -327,20 +318,20 @@ PNAME(s3c2450_cam_p) = { "div_cam", "hclk" };
PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" };
PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" };
-struct samsung_div_clock s3c2450_dividers[] __initdata = {
+static struct samsung_div_clock s3c2450_dividers[] __initdata = {
DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2),
DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4),
DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4),
};
-struct samsung_mux_clock s3c2450_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2450_muxes[] __initdata = {
MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1),
MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1),
MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2),
};
-struct samsung_gate_clock s3c2450_gates[] __initdata = {
+static struct samsung_gate_clock s3c2450_gates[] __initdata = {
GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0),
GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0),
GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
@@ -351,7 +342,7 @@ struct samsung_gate_clock s3c2450_gates[] __initdata = {
GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0),
};
-struct samsung_clock_alias s3c2450_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2450_aliases[] __initdata = {
ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"),
ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"),
ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"),
@@ -374,7 +365,7 @@ static struct notifier_block s3c2443_restart_handler = {
* fixed rate clocks generated outside the soc
* Only necessary until the devicetree-move is complete
*/
-struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
FRATE(0, "xti", NULL, 0, 0),
FRATE(0, "ext", NULL, 0, 0),
FRATE(0, "ext_i2s", NULL, 0, 0),
@@ -470,18 +461,18 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2416_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2416, 0);
+ s3c2443_common_clk_init(np, 0, S3C2416, NULL);
}
CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init);
static void __init s3c2443_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2443, 0);
+ s3c2443_common_clk_init(np, 0, S3C2443, NULL);
}
CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init);
static void __init s3c2450_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2450, 0);
+ s3c2443_common_clk_init(np, 0, S3C2450, NULL);
}
CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 7306867a0ab8..6db01cf5ab83 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -56,11 +56,6 @@
#define GATE_ON(_id, cname, pname, o, b) \
GATE(_id, cname, pname, o, b, CLK_IGNORE_UNUSED, 0)
-/* list of PLLs to be registered */
-enum s3c64xx_plls {
- apll, mpll, epll,
-};
-
static void __iomem *reg_base;
static bool is_s3c6400;
@@ -364,12 +359,12 @@ GATE_CLOCKS(s3c6410_gate_clks) __initdata = {
/* List of PLL clocks. */
static struct samsung_pll_clock s3c64xx_pll_clks[] __initdata = {
- [apll] = PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON, NULL),
- [mpll] = PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
- MPLL_LOCK, MPLL_CON, NULL),
- [epll] = PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
- EPLL_LOCK, EPLL_CON0, NULL),
+ PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON, NULL),
+ PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
};
/* Aliases for common s3c64xx clocks. */
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index 9146c20fe21f..ce5aa7802eb8 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += clk.o
-obj-y += clk-gate.o
-obj-y += clk-pll.o
-obj-y += clk-periph.o
-obj-y += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
+obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o
+obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
+obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o
+obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
new file mode 100644
index 000000000000..eee2d48ab656
--- /dev/null
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include "stratix10-clk.h"
+#include "clk.h"
+
+#define SOCFPGA_CS_PDBG_CLK "cs_pdbg_clk"
+#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
+
+static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = 1, val;
+
+ if (socfpgaclk->fixed_div) {
+ div = socfpgaclk->fixed_div;
+ } else if (socfpgaclk->div_reg) {
+ val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ val &= GENMASK(socfpgaclk->width - 1, 0);
+ div = (1 << val);
+ }
+ return parent_rate / div;
+}
+
+static unsigned long socfpga_dbg_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = 1, val;
+
+ val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ val &= GENMASK(socfpgaclk->width - 1, 0);
+ div = (1 << val);
+ div = div ? 4 : 1;
+
+ return parent_rate / div;
+}
+
+static u8 socfpga_gate_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 mask;
+ u8 parent = 0;
+
+ if (socfpgaclk->bypass_reg) {
+ mask = (0x1 << socfpgaclk->bypass_shift);
+ parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
+ socfpgaclk->bypass_shift);
+ }
+ return parent;
+}
+
+static struct clk_ops gateclk_ops = {
+ .recalc_rate = socfpga_gate_clk_recalc_rate,
+ .get_parent = socfpga_gate_get_parent,
+};
+
+static const struct clk_ops dbgclk_ops = {
+ .recalc_rate = socfpga_dbg_clk_recalc_rate,
+ .get_parent = socfpga_gate_get_parent,
+};
+
+struct clk *s10_register_gate(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *regbase, unsigned long gate_reg,
+ unsigned long gate_idx, unsigned long div_reg,
+ unsigned long div_offset, u8 div_width,
+ unsigned long bypass_reg, u8 bypass_shift,
+ u8 fixed_div)
+{
+ struct clk *clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ struct clk_init_data init;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (!socfpga_clk)
+ return NULL;
+
+ socfpga_clk->hw.reg = regbase + gate_reg;
+ socfpga_clk->hw.bit_idx = gate_idx;
+
+ gateclk_ops.enable = clk_gate_ops.enable;
+ gateclk_ops.disable = clk_gate_ops.disable;
+
+ socfpga_clk->fixed_div = fixed_div;
+
+ if (div_reg)
+ socfpga_clk->div_reg = regbase + div_reg;
+ else
+ socfpga_clk->div_reg = NULL;
+
+ socfpga_clk->width = div_width;
+ socfpga_clk->shift = div_offset;
+
+ if (bypass_reg)
+ socfpga_clk->bypass_reg = regbase + bypass_reg;
+ else
+ socfpga_clk->bypass_reg = NULL;
+ socfpga_clk->bypass_shift = bypass_shift;
+
+ if (streq(name, "cs_pdbg_clk"))
+ init.ops = &dbgclk_ops;
+ else
+ init.ops = &gateclk_ops;
+
+ init.name = name;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+ socfpga_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &socfpga_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(socfpga_clk);
+ return NULL;
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
new file mode 100644
index 000000000000..568f59b58ddf
--- /dev/null
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#include "stratix10-clk.h"
+#include "clk.h"
+
+#define CLK_MGR_FREE_SHIFT 16
+#define CLK_MGR_FREE_MASK 0x7
+#define SWCTRLBTCLKSEN_SHIFT 8
+
+#define to_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+
+static unsigned long clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div = 1;
+ u32 val;
+
+ val = readl(socfpgaclk->hw.reg);
+ val &= GENMASK(SWCTRLBTCLKSEN_SHIFT - 1, 0);
+ parent_rate /= val;
+
+ return parent_rate / div;
+}
+
+static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div = 1;
+
+ if (socfpgaclk->fixed_div) {
+ div = socfpgaclk->fixed_div;
+ } else {
+ if (!socfpgaclk->bypass_reg)
+ div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
+ }
+
+ return parent_rate / div;
+}
+
+static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ u32 clk_src, mask;
+ u8 parent;
+
+ if (socfpgaclk->bypass_reg) {
+ mask = (0x1 << socfpgaclk->bypass_shift);
+ parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
+ socfpgaclk->bypass_shift);
+ } else {
+ clk_src = readl(socfpgaclk->hw.reg);
+ parent = (clk_src >> CLK_MGR_FREE_SHIFT) &
+ CLK_MGR_FREE_MASK;
+ }
+ return parent;
+}
+
+static const struct clk_ops peri_c_clk_ops = {
+ .recalc_rate = clk_peri_c_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
+static const struct clk_ops peri_cnt_clk_ops = {
+ .recalc_rate = clk_peri_cnt_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
+struct clk *s10_register_periph(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *reg, unsigned long offset)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ periph_clk->hw.reg = reg + offset;
+
+ init.name = name;
+ init.ops = &peri_c_clk_ops;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
+
+struct clk *s10_register_cnt_periph(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *regbase, unsigned long offset,
+ u8 fixed_divider, unsigned long bypass_reg,
+ unsigned long bypass_shift)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ if (offset)
+ periph_clk->hw.reg = regbase + offset;
+ else
+ periph_clk->hw.reg = NULL;
+
+ if (bypass_reg)
+ periph_clk->bypass_reg = regbase + bypass_reg;
+ else
+ periph_clk->bypass_reg = NULL;
+ periph_clk->bypass_shift = bypass_shift;
+ periph_clk->fixed_div = fixed_divider;
+
+ init.name = name;
+ init.ops = &peri_cnt_clk_ops;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
new file mode 100644
index 000000000000..2d5d8b43727e
--- /dev/null
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#include "stratix10-clk.h"
+#include "clk.h"
+
+/* Clock Manager offsets */
+#define CLK_MGR_PLL_CLK_SRC_SHIFT 16
+#define CLK_MGR_PLL_CLK_SRC_MASK 0x3
+
+/* PLL Clock enable bits */
+#define SOCFPGA_PLL_POWER 0
+#define SOCFPGA_PLL_RESET_MASK 0x2
+#define SOCFPGA_PLL_REFDIV_MASK 0x00003F00
+#define SOCFPGA_PLL_REFDIV_SHIFT 8
+#define SOCFPGA_PLL_MDIV_MASK 0xFF000000
+#define SOCFPGA_PLL_MDIV_SHIFT 24
+#define SWCTRLBTCLKSEL_MASK 0x200
+#define SWCTRLBTCLKSEL_SHIFT 9
+
+#define SOCFPGA_BOOT_CLK "boot_clk"
+
+#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long mdiv;
+ unsigned long refdiv;
+ unsigned long reg;
+ unsigned long long vco_freq;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg);
+ refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+ vco_freq = (unsigned long long)parent_rate / refdiv;
+
+ /* Read mdiv and fdiv from the fdbck register */
+ reg = readl(socfpgaclk->hw.reg + 0x4);
+ mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
+ vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+
+ return (unsigned long)vco_freq;
+}
+
+static unsigned long clk_boot_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 div = 1;
+
+ div = ((readl(socfpgaclk->hw.reg) &
+ SWCTRLBTCLKSEL_MASK) >>
+ SWCTRLBTCLKSEL_SHIFT);
+ div += 1;
+ return parent_rate /= div;
+}
+
+
+static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 pll_src;
+
+ pll_src = readl(socfpgaclk->hw.reg);
+ return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) &
+ CLK_MGR_PLL_CLK_SRC_MASK;
+}
+
+static u8 clk_boot_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 pll_src;
+
+ pll_src = readl(socfpgaclk->hw.reg);
+ return (pll_src >> SWCTRLBTCLKSEL_SHIFT) &
+ SWCTRLBTCLKSEL_MASK;
+}
+
+static int clk_pll_prepare(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 reg;
+
+ /* Bring PLL out of reset */
+ reg = readl(socfpgaclk->hw.reg);
+ reg |= SOCFPGA_PLL_RESET_MASK;
+ writel(reg, socfpgaclk->hw.reg);
+
+ return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+static struct clk_ops clk_boot_ops = {
+ .recalc_rate = clk_boot_clk_recalc_rate,
+ .get_parent = clk_boot_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+struct clk *s10_register_pll(const char *name, const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *reg, unsigned long offset)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &clk_pll_ops;
+
+ init.name = name;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+ clk_pll_ops.enable = clk_gate_ops.enable;
+ clk_pll_ops.disable = clk_gate_ops.disable;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
new file mode 100644
index 000000000000..3a11c382a663
--- /dev/null
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/stratix10-clock.h>
+
+#include "stratix10-clk.h"
+
+static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk",};
+static const char * const cntr_mux[] = { "main_pll", "periph_pll",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+
+static const char * const noc_free_mux[] = {"main_noc_base_clk",
+ "peri_noc_base_clk",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+
+static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
+static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
+static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
+static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
+static const char * const sdmmc_free_mux[] = {"peri_sdmmc_clk", "boot_clk"};
+static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
+static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
+static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
+
+static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
+static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
+
+/* clocks in AO (always on) controller */
+static const struct stratix10_pll_clock s10_pll_clks[] = {
+ { STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+ 0x0},
+ { STRATIX10_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x74},
+ { STRATIX10_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0xe4},
+};
+
+static const struct stratix10_perip_c_clock s10_main_perip_c_clks[] = {
+ { STRATIX10_MAIN_MPU_BASE_CLK, "main_mpu_base_clk", "main_pll", NULL, 1, 0, 0x84},
+ { STRATIX10_MAIN_NOC_BASE_CLK, "main_noc_base_clk", "main_pll", NULL, 1, 0, 0x88},
+ { STRATIX10_PERI_MPU_BASE_CLK, "peri_mpu_base_clk", "periph_pll", NULL, 1, 0,
+ 0xF4},
+ { STRATIX10_PERI_NOC_BASE_CLK, "peri_noc_base_clk", "periph_pll", NULL, 1, 0,
+ 0xF8},
+};
+
+static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
+ { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0x48, 0, 0, 0},
+ { STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+ 0, 0x4C, 0, 0, 0},
+ { STRATIX10_MAIN_EMACA_CLK, "main_emaca_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x50, 0, 0, 0},
+ { STRATIX10_MAIN_EMACB_CLK, "main_emacb_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x54, 0, 0, 0},
+ { STRATIX10_MAIN_EMAC_PTP_CLK, "main_emac_ptp_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x58, 0, 0, 0},
+ { STRATIX10_MAIN_GPIO_DB_CLK, "main_gpio_db_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x5C, 0, 0, 0},
+ { STRATIX10_MAIN_SDMMC_CLK, "main_sdmmc_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x60, 0, 0, 0},
+ { STRATIX10_MAIN_S2F_USR0_CLK, "main_s2f_usr0_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0x64, 0, 0, 0},
+ { STRATIX10_MAIN_S2F_USR1_CLK, "main_s2f_usr1_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x68, 0, 0, 0},
+ { STRATIX10_MAIN_PSI_REF_CLK, "main_psi_ref_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x6C, 0, 0, 0},
+ { STRATIX10_PERI_EMACA_CLK, "peri_emaca_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xBC, 0, 0, 0},
+ { STRATIX10_PERI_EMACB_CLK, "peri_emacb_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC0, 0, 0, 0},
+ { STRATIX10_PERI_EMAC_PTP_CLK, "peri_emac_ptp_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC4, 0, 0, 0},
+ { STRATIX10_PERI_GPIO_DB_CLK, "peri_gpio_db_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC8, 0, 0, 0},
+ { STRATIX10_PERI_SDMMC_CLK, "peri_sdmmc_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xCC, 0, 0, 0},
+ { STRATIX10_PERI_S2F_USR0_CLK, "peri_s2f_usr0_clk", "peri_noc_base_clk", NULL, 1, 0,
+ 0xD0, 0, 0, 0},
+ { STRATIX10_PERI_S2F_USR1_CLK, "peri_s2f_usr1_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xD4, 0, 0, 0},
+ { STRATIX10_PERI_PSI_REF_CLK, "peri_psi_ref_clk", "peri_noc_base_clk", NULL, 1, 0,
+ 0xD8, 0, 0, 0},
+ { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+ 0, 4, 0, 0},
+ { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+ 0, 0, 0, 0x3C, 1},
+ { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 0, 0, 4, 0xB0, 0},
+ { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+ 0, 0, 4, 0xB0, 1},
+ { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
+ { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
+ { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
+ ARRAY_SIZE(sdmmc_free_mux), 0, 0, 0, 0xB0, 4},
+ { STRATIX10_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
+ ARRAY_SIZE(s2f_usr1_free_mux), 0, 0, 0, 0xB0, 5},
+ { STRATIX10_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0, 0, 0xB0, 6},
+};
+
+static const struct stratix10_gate_clock s10_gate_clks[] = {
+ { STRATIX10_MPU_CLK, "mpu_clk", NULL, mpu_mux, ARRAY_SIZE(mpu_mux), 0, 0x30,
+ 0, 0, 0, 0, 0x3C, 0, 0},
+ { STRATIX10_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x30,
+ 0, 0, 0, 0, 0, 0, 4},
+ { STRATIX10_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x30,
+ 0, 0, 0, 0, 0, 0, 2},
+ { STRATIX10_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 1, 0x70, 0, 2, 0, 0, 0},
+ { STRATIX10_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 2, 0x70, 8, 2, 0, 0, 0},
+ { STRATIX10_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x30,
+ 3, 0x70, 16, 2, 0, 0, 0},
+ { STRATIX10_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 24, 2, 0, 0, 0},
+ { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 26, 2, 0, 0, 0},
+ { STRATIX10_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 28, 1, 0, 0, 0},
+ { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 5, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x30,
+ 6, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 0, 0, 0, 0, 0xDC, 26, 0},
+ { STRATIX10_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 1, 0, 0, 0, 0xDC, 27, 0},
+ { STRATIX10_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 2, 0, 0, 0, 0xDC, 28, 0},
+ { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0xA4,
+ 3, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0xA4,
+ 4, 0xE0, 0, 16, 0, 0, 0},
+ { STRATIX10_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0xA4,
+ 5, 0, 0, 0, 0, 0, 4},
+ { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0xA4,
+ 6, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0xA4,
+ 7, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+ 8, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+ 9, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0xA4,
+ 10, 0, 0, 0, 0, 0, 0},
+};
+
+static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_periph(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names, clks[i].num_parents,
+ clks[i].flags, base, clks[i].offset);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
+static int s10_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_cnt_periph(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].offset,
+ clks[i].fixed_divider,
+ clks[i].bypass_reg,
+ clks[i].bypass_shift);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int s10_clk_register_gate(const struct stratix10_gate_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_gate(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].gate_reg,
+ clks[i].gate_idx, clks[i].div_reg,
+ clks[i].div_offset, clks[i].div_width,
+ clks[i].bypass_reg,
+ clks[i].bypass_shift,
+ clks[i].fixed_div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int s10_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_pll(clks[i].name, clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].offset);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static struct stratix10_clock_data *__socfpga_s10_clk_init(struct device_node *np,
+ int nr_clks)
+{
+ struct stratix10_clock_data *clk_data;
+ struct clk **clk_table;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ goto err;
+ }
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err;
+
+ clk_data->base = base;
+ clk_table = kcalloc(nr_clks, sizeof(*clk_table), GFP_KERNEL);
+ if (!clk_table)
+ goto err_data;
+
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
+ return clk_data;
+
+err_data:
+ kfree(clk_data);
+err:
+ return NULL;
+}
+
+static int s10_clkmgr_init(struct device_node *np)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_s10_clk_init(np, STRATIX10_NUM_CLKS);
+ if (!clk_data)
+ return -ENOMEM;
+
+ s10_clk_register_pll(s10_pll_clks, ARRAY_SIZE(s10_pll_clks), clk_data);
+
+ s10_clk_register_c_perip(s10_main_perip_c_clks,
+ ARRAY_SIZE(s10_main_perip_c_clks), clk_data);
+
+ s10_clk_register_cnt_perip(s10_main_perip_cnt_clks,
+ ARRAY_SIZE(s10_main_perip_cnt_clks),
+ clk_data);
+
+ s10_clk_register_gate(s10_gate_clks, ARRAY_SIZE(s10_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static int s10_clkmgr_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ s10_clkmgr_init(np);
+
+ return 0;
+}
+
+static const struct of_device_id stratix10_clkmgr_match_table[] = {
+ { .compatible = "intel,stratix10-clkmgr",
+ .data = s10_clkmgr_init },
+ { }
+};
+
+static struct platform_driver stratix10_clkmgr_driver = {
+ .probe = s10_clkmgr_probe,
+ .driver = {
+ .name = "stratix10-clkmgr",
+ .of_match_table = stratix10_clkmgr_match_table,
+ },
+};
+
+static int __init s10_clk_init(void)
+{
+ return platform_driver_register(&stratix10_clkmgr_driver);
+}
+core_initcall(s10_clk_init);
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
index 9cf1230115b1..26c3a265cf78 100644
--- a/drivers/clk/socfpga/clk.h
+++ b/drivers/clk/socfpga/clk.h
@@ -54,9 +54,11 @@ struct socfpga_gate_clk {
char *parent_name;
u32 fixed_div;
void __iomem *div_reg;
+ void __iomem *bypass_reg;
struct regmap *sys_mgr_base_addr;
u32 width; /* only valid if div_reg != 0 */
u32 shift; /* only valid if div_reg != 0 */
+ u32 bypass_shift; /* only valid if bypass_reg != 0 */
u32 clk_phase[2];
};
@@ -65,8 +67,10 @@ struct socfpga_periph_clk {
char *parent_name;
u32 fixed_div;
void __iomem *div_reg;
+ void __iomem *bypass_reg;
u32 width; /* only valid if div_reg != 0 */
u32 shift; /* only valid if div_reg != 0 */
+ u32 bypass_shift; /* only valid if bypass_reg != 0 */
};
#endif /* SOCFPGA_CLK_H */
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
new file mode 100644
index 000000000000..e8e121907952
--- /dev/null
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+
+#ifndef __STRATIX10_CLK_H
+#define __STRATIX10_CLK_H
+
+struct stratix10_clock_data {
+ struct clk_onecell_data clk_data;
+ void __iomem *base;
+};
+
+struct stratix10_pll_clock {
+ unsigned int id;
+ const char *name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct stratix10_perip_c_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct stratix10_perip_cnt_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 fixed_divider;
+ unsigned long bypass_reg;
+ unsigned long bypass_shift;
+};
+
+struct stratix10_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long gate_reg;
+ u8 gate_idx;
+ unsigned long div_reg;
+ u8 div_offset;
+ u8 div_width;
+ unsigned long bypass_reg;
+ u8 bypass_shift;
+ u8 fixed_div;
+};
+
+struct clk *s10_register_pll(const char *, const char *const *, u8,
+ unsigned long, void __iomem *, unsigned long);
+
+struct clk *s10_register_periph(const char *, const char *,
+ const char * const *, u8, unsigned long,
+ void __iomem *, unsigned long);
+struct clk *s10_register_cnt_periph(const char *, const char *,
+ const char * const *, u8,
+ unsigned long, void __iomem *,
+ unsigned long, u8, unsigned long,
+ unsigned long);
+struct clk *s10_register_gate(const char *, const char *,
+ const char * const *, u8,
+ unsigned long, void __iomem *,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long, u8,
+ unsigned long, u8, u8);
+#endif /* __STRATIX10_CLK_H */
diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
index ed5c027df0f4..9980ab55271b 100644
--- a/drivers/clk/sprd/sc9860-clk.c
+++ b/drivers/clk/sprd/sc9860-clk.c
@@ -959,6 +959,44 @@ static SPRD_SC_GATE_CLK(sdio2_2x_en, "sdio2-2x-en", "aon-apb", 0x13c,
0x1000, BIT(6), 0, 0);
static SPRD_SC_GATE_CLK(emmc_2x_en, "emmc-2x-en", "aon-apb", 0x13c,
0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK(arch_rtc_eb, "arch-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(kpb_rtc_eb, "kpb-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(aon_syst_rtc_eb, "aon-syst-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_syst_rtc_eb, "ap-syst-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(eic_rtc_eb, "eic-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(eic_rtcdv5_eb, "eic-rtcdv5-eb", "aon-apb", 0x10,
+ 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(dcxo_tmr_rtc_eb, "dcxo-tmr-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(bb_cal_rtc_eb, "bb-cal-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_big_rtc_eb, "avs-big-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_lit_rtc_eb, "avs-lit-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_gpu0_rtc_eb, "avs-gpu0-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(22), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_gpu1_rtc_eb, "avs-gpu1-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(23), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(gpu_ts_eb, "gpu-ts-eb", "aon-apb", 0x10,
+ 0x1000, BIT(24), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(rtcdv10_eb, "rtcdv10-eb", "aon-apb", 0x10,
+ 0x1000, BIT(27), CLK_IGNORE_UNUSED, 0);
static struct sprd_clk_common *sc9860_aon_gate[] = {
/* address base is 0x402e0000 */
@@ -1030,6 +1068,25 @@ static struct sprd_clk_common *sc9860_aon_gate[] = {
&sdio1_2x_en.common,
&sdio2_2x_en.common,
&emmc_2x_en.common,
+ &arch_rtc_eb.common,
+ &kpb_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dcxo_tmr_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &avs_big_rtc_eb.common,
+ &avs_lit_rtc_eb.common,
+ &avs_gpu0_rtc_eb.common,
+ &avs_gpu1_rtc_eb.common,
+ &gpu_ts_eb.common,
+ &rtcdv10_eb.common,
};
static struct clk_hw_onecell_data sc9860_aon_gate_hws = {
@@ -1102,6 +1159,25 @@ static struct clk_hw_onecell_data sc9860_aon_gate_hws = {
[CLK_SDIO1_2X_EN] = &sdio1_2x_en.common.hw,
[CLK_SDIO2_2X_EN] = &sdio2_2x_en.common.hw,
[CLK_EMMC_2X_EN] = &emmc_2x_en.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPB_RTC_EB] = &kpb_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DCXO_TMR_RTC_EB] = &dcxo_tmr_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_AVS_BIG_RTC_EB] = &avs_big_rtc_eb.common.hw,
+ [CLK_AVS_LIT_RTC_EB] = &avs_lit_rtc_eb.common.hw,
+ [CLK_AVS_GPU0_RTC_EB] = &avs_gpu0_rtc_eb.common.hw,
+ [CLK_AVS_GPU1_RTC_EB] = &avs_gpu1_rtc_eb.common.hw,
+ [CLK_GPU_TS_EB] = &gpu_ts_eb.common.hw,
+ [CLK_RTCDV10_EB] = &rtcdv10_eb.common.hw,
},
.num = CLK_AON_GATE_NUM,
};
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 6427d0ebe2de..79dfd296c3d1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -11,15 +11,13 @@ config SUN50I_A64_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_H6_CCU
+ bool "Support for the Allwinner H6 CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN4I_A10_CCU
bool "Support for the Allwinner A10/A20 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN4I
default MACH_SUN7I
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 4141c3fe08ae..128a40ee5c5e 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -22,6 +22,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
+obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
new file mode 100644
index 000000000000..bdbfe78fe133
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-h6.h"
+
+/*
+ * The CPU PLL is actually NP clock, with P being /1, /2 or /4. However
+ * P should only be used for output frequencies lower than 288 MHz.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ *
+ * The M factor is present in the register's description, but not in the
+ * frequency formula, and it's documented as "M is only used for backdoor
+ * testing", so it's not modelled and then force to 0.
+ */
+#define SUN50I_H6_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN50I_H6_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT("pll-ddr0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_PERIPH0_REG 0x020
+static struct ccu_nkmp pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x020,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_PERIPH1_REG 0x028
+static struct ccu_nkmp pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * For Video PLLs, the output divider is described as "used for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN50I_H6_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x040,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video0", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x048,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video1", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT("pll-ve", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_DE_REG 0x060
+static struct ccu_nkmp pll_de_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x060,
+ .hw.init = CLK_HW_INIT("pll-de", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_HSIC_REG 0x070
+static struct ccu_nkmp pll_hsic_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x070,
+ .hw.init = CLK_HW_INIT("pll-hsic", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 3 outputs: 2 fixed factors from
+ * the base (2x and 4x), and one variable divider (the one true pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names.
+ */
+#define SUN50I_H6_PLL_AUDIO_REG 0x078
+static struct ccu_nm pll_audio_base_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x078,
+ .hw.init = CLK_HW_INIT("pll-audio-base", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const cpux_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x500, 0, 2, 0);
+static SUNXI_CCU_M(cpux_apb_clk, "cpux-apb", "cpux", 0x500, 8, 2, 0);
+
+static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents,
+ 0x510,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-periph0-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x540,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const de_parents[] = { "pll-de", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
+ 0x60c, BIT(0), 0);
+
+static const char * const deinterlace_parents[] = { "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
+ deinterlace_parents,
+ 0x620,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
+ 0x62c, BIT(0), 0);
+
+static const char * const gpu_parents[] = { "pll-gpu" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
+ 0x67c, BIT(0), 0);
+
+/* Also applies to EMCE */
+static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "psi-ahb1-ahb2",
+ 0x68c, BIT(0), 0);
+
+static const char * const ve_parents[] = { "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(emce_clk, "emce", ce_parents, 0x6b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_emce_clk, "bus-emce", "psi-ahb1-ahb2",
+ 0x6bc, BIT(0), 0);
+
+static const char * const vp9_parents[] = { "pll-ve", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(vp9_clk, "vp9", vp9_parents, 0x6c0,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_vp9_clk, "bus-vp9", "psi-ahb1-ahb2",
+ 0x6cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "psi-ahb1-ahb2",
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "psi-ahb1-ahb2",
+ 0x71c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "psi-ahb1-ahb2",
+ 0x72c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "psi-ahb1-ahb2",
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
+ 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0" };
+static struct ccu_div dram_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x800,
+ .hw.init = CLK_HW_INIT_PARENTS("dram",
+ dram_parents,
+ &ccu_div_ops,
+ CLK_IS_CRITICAL),
+ },
+};
+
+static SUNXI_CCU_GATE(mbus_dma_clk, "mbus-dma", "mbus",
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE(mbus_ve_clk, "mbus-ve", "mbus",
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE(mbus_ce_clk, "mbus-ce", "mbus",
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE(mbus_ts_clk, "mbus-ts", "mbus",
+ 0x804, BIT(3), 0);
+static SUNXI_CCU_GATE(mbus_nand_clk, "mbus-nand", "mbus",
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE(mbus_csi_clk, "mbus-csi", "mbus",
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE(mbus_deinterlace_clk, "mbus-deinterlace", "mbus",
+ 0x804, BIT(11), 0);
+
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "psi-ahb1-ahb2",
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const char * const nand_spi_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", nand_spi_parents, 0x810,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", nand_spi_parents, 0x814,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
+
+static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb3", 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x90c, BIT(3), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", 0x91c, BIT(3), 0);
+
+static SUNXI_CCU_GATE(bus_scr0_clk, "bus-scr0", "apb2", 0x93c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_scr1_clk, "bus-scr1", "apb2", 0x93c, BIT(1), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", nand_spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", nand_spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb3", 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb3", 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb3", 0x97c, BIT(0), 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x9b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb3", 0x9bc, BIT(0), 0);
+
+static const char * const ir_tx_parents[] = { "osc32k", "osc24M" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_parents, 0x9c0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ir_tx_clk, "bus-ir-tx", "apb1", 0x9cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", 0x9fc, BIT(0), 0);
+
+static const char * const audio_parents[] = { "pll-audio", "pll-audio-2x", "pll-audio-4x" };
+static struct ccu_div i2s3_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa0c,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s3",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s0_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa10,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s0",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s1_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa14,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s1",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s2_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa18,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s2",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", 0xa1c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", 0xa1c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", 0xa1c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2s3_clk, "bus-i2s3", "apb1", 0xa1c, BIT(3), 0);
+
+static struct ccu_div spdif_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa20,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", 0xa2c, BIT(0), 0);
+
+static struct ccu_div dmic_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa40,
+ .hw.init = CLK_HW_INIT_PARENTS("dmic",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_dmic_clk, "bus-dmic", "apb1", 0xa4c, BIT(0), 0);
+
+static struct ccu_div audio_hub_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa60,
+ .hw.init = CLK_HW_INIT_PARENTS("audio-hub",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_audio_hub_clk, "bus-audio-hub", "apb1", 0xa6c, BIT(0), 0);
+
+/*
+ * There are OHCI 12M clock source selection bits for 2 USB 2.0 ports.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN50I_H6_USB0_CLK_REG 0xa70
+#define SUN50I_H6_USB3_CLK_REG 0xa7c
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", 0xa70, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0xa70, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", 0xa74, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc12M", 0xa7c, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc12M", 0xa7c, BIT(29), 0);
+static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M", 0xa7c, BIT(27), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic", 0xa7c, BIT(26), 0);
+
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb3", 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb3", 0xa8c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb3", 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_xhci_clk, "bus-xhci", "ahb3", 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb3", 0xa8c, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0);
+
+static CLK_FIXED_FACTOR(pcie_ref_100m_clk, "pcie-ref-100M",
+ "pll-periph0-4x", 24, 1, 0);
+static SUNXI_CCU_GATE(pcie_ref_clk, "pcie-ref", "pcie-ref-100M",
+ 0xab0, BIT(31), 0);
+static SUNXI_CCU_GATE(pcie_ref_out_clk, "pcie-ref-out", "pcie-ref",
+ 0xab0, BIT(30), 0);
+
+static SUNXI_CCU_M_WITH_GATE(pcie_maxi_clk, "pcie-maxi",
+ "pll-periph0", 0xab4,
+ 0, 4, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_GATE(pcie_aux_clk, "pcie-aux", "osc24M", 0xab8,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_pcie_clk, "bus-pcie", "psi-ahb1-ahb2",
+ 0xabc, BIT(0), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, 0xb00,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0xb04, BIT(31), 0);
+
+static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
+static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
+ { .index = 1, .div = 36621 },
+};
+static struct ccu_mux hdmi_cec_clk = {
+ .enable = BIT(31),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .fixed_predivs = hdmi_cec_predivs,
+ .n_predivs = ARRAY_SIZE(hdmi_cec_predivs),
+ },
+
+ .common = {
+ .reg = 0xb10,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
+ hdmi_cec_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb3", 0xb1c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb3",
+ 0xb5c, BIT(0), 0);
+
+static const char * const tcon_lcd0_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
+ tcon_lcd0_parents, 0xb60,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
+ 0xb7c, BIT(0), 0);
+
+static const char * const tcon_tv0_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
+ tcon_tv0_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
+ 0xb9c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(csi_cci_clk, "csi-cci", "osc24M", 0xc00, BIT(0), 0);
+
+static const char * const csi_top_parents[] = { "pll-video0", "pll-ve",
+ "pll-periph0" };
+static const u8 csi_top_table[] = { 0, 2, 3 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_top_clk, "csi-top",
+ csi_top_parents, csi_top_table, 0xc04,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video0",
+ "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk",
+ csi_mclk_parents, 0xc08,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb3", 0xc2c, BIT(0), 0);
+
+static const char * const hdcp_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdcp_clk, "hdcp", hdcp_parents, 0xc40,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_hdcp_clk, "bus-hdcp", "ahb3", 0xc4c, BIT(0), 0);
+
+/* Fixed factor clocks */
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
+
+/*
+ * The divider of pll-audio is fixed to 8 now, as pll-audio-4x has a
+ * fixed post-divider 2.
+ */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 8, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR(pll_periph0_4x_clk, "pll-periph0-4x",
+ "pll-periph0", 1, 4, 0);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+
+static CLK_FIXED_FACTOR(pll_periph1_4x_clk, "pll-periph1-4x",
+ "pll-periph1", 1, 4, 0);
+static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x",
+ "pll-periph1", 1, 2, 0);
+
+static CLK_FIXED_FACTOR(pll_video0_4x_clk, "pll-video0-4x",
+ "pll-video0", 1, 4, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR(pll_video1_4x_clk, "pll-video1-4x",
+ "pll-video1", 1, 4, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_h6_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_clk.common,
+ &pll_video1_clk.common,
+ &pll_ve_clk.common,
+ &pll_de_clk.common,
+ &pll_hsic_clk.common,
+ &pll_audio_base_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb1_ahb2_clk.common,
+ &ahb3_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &deinterlace_clk.common,
+ &bus_deinterlace_clk.common,
+ &gpu_clk.common,
+ &bus_gpu_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &emce_clk.common,
+ &bus_emce_clk.common,
+ &vp9_clk.common,
+ &bus_vp9_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_psi_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_ts_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_deinterlace_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_scr0_clk.common,
+ &bus_scr1_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_emac_clk.common,
+ &ts_clk.common,
+ &bus_ts_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &bus_ths_clk.common,
+ &i2s3_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_hub_clk.common,
+ &bus_audio_hub_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_ohci3_clk.common,
+ &usb_phy3_clk.common,
+ &usb_hsic_12m_clk.common,
+ &usb_hsic_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci3_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_xhci_clk.common,
+ &bus_ehci3_clk.common,
+ &bus_otg_clk.common,
+ &pcie_ref_clk.common,
+ &pcie_ref_out_clk.common,
+ &pcie_maxi_clk.common,
+ &pcie_aux_clk.common,
+ &bus_pcie_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_tcon_top_clk.common,
+ &tcon_lcd0_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &tcon_tv0_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &csi_cci_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk_clk.common,
+ &bus_csi_clk.common,
+ &hdcp_clk.common,
+ &bus_hdcp_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_h6_hw_clks = {
+ .hws = {
+ [CLK_OSC12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_PERIPH1_4X] = &pll_periph1_4x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB1_AHB2] = &psi_ahb1_ahb2_clk.common.hw,
+ [CLK_AHB3] = &ahb3_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_EMCE] = &emce_clk.common.hw,
+ [CLK_BUS_EMCE] = &bus_emce_clk.common.hw,
+ [CLK_VP9] = &vp9_clk.common.hw,
+ [CLK_BUS_VP9] = &bus_vp9_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PSI] = &bus_psi_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TS] = &mbus_ts_clk.common.hw,
+ [CLK_MBUS_NAND] = &mbus_nand_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_DEINTERLACE] = &mbus_deinterlace_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_SCR0] = &bus_scr0_clk.common.hw,
+ [CLK_BUS_SCR1] = &bus_scr1_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_I2S3] = &i2s3_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_HUB] = &audio_hub_clk.common.hw,
+ [CLK_BUS_AUDIO_HUB] = &bus_audio_hub_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
+ [CLK_USB_PHY3] = &usb_phy3_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_XHCI] = &bus_xhci_clk.common.hw,
+ [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_PCIE_REF_100M] = &pcie_ref_100m_clk.hw,
+ [CLK_PCIE_REF] = &pcie_ref_clk.common.hw,
+ [CLK_PCIE_REF_OUT] = &pcie_ref_out_clk.common.hw,
+ [CLK_PCIE_MAXI] = &pcie_maxi_clk.common.hw,
+ [CLK_PCIE_AUX] = &pcie_aux_clk.common.hw,
+ [CLK_BUS_PCIE] = &bus_pcie_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_CSI_CCI] = &csi_cci_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_HDCP] = &hdcp_clk.common.hw,
+ [CLK_BUS_HDCP] = &bus_hdcp_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_h6_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DEINTERLACE] = { 0x62c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_EMCE] = { 0x6bc, BIT(16) },
+ [RST_BUS_VP9] = { 0x6cc, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX] = { 0x71c, BIT(16) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PSI] = { 0x79c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_IOMMU] = { 0x7bc, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_SCR0] = { 0x93c, BIT(16) },
+ [RST_BUS_SCR1] = { 0x93c, BIT(17) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC] = { 0x97c, BIT(16) },
+ [RST_BUS_TS] = { 0x9bc, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_I2S0] = { 0xa1c, BIT(16) },
+ [RST_BUS_I2S1] = { 0xa1c, BIT(17) },
+ [RST_BUS_I2S2] = { 0xa1c, BIT(18) },
+ [RST_BUS_I2S3] = { 0xa1c, BIT(19) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO_HUB] = { 0xa6c, BIT(16) },
+
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_USB_PHY3] = { 0xa7c, BIT(30) },
+ [RST_USB_HSIC] = { 0xa7c, BIT(28) },
+
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI3] = { 0xa8c, BIT(19) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_XHCI] = { 0xa8c, BIT(21) },
+ [RST_BUS_EHCI3] = { 0xa8c, BIT(23) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_PCIE] = { 0xabc, BIT(16) },
+
+ [RST_PCIE_POWERUP] = { 0xabc, BIT(17) },
+
+ [RST_BUS_HDMI] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_CSI] = { 0xc2c, BIT(16) },
+ [RST_BUS_HDCP] = { 0xc4c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_h6_ccu_desc = {
+ .ccu_clks = sun50i_h6_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_ccu_clks),
+
+ .hw_clks = &sun50i_h6_hw_clks,
+
+ .resets = sun50i_h6_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h6_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN50I_H6_PLL_CPUX_REG,
+ SUN50I_H6_PLL_DDR0_REG,
+ SUN50I_H6_PLL_PERIPH0_REG,
+ SUN50I_H6_PLL_PERIPH1_REG,
+ SUN50I_H6_PLL_GPU_REG,
+ SUN50I_H6_PLL_VIDEO0_REG,
+ SUN50I_H6_PLL_VIDEO1_REG,
+ SUN50I_H6_PLL_VE_REG,
+ SUN50I_H6_PLL_DE_REG,
+ SUN50I_H6_PLL_HSIC_REG,
+ SUN50I_H6_PLL_AUDIO_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN50I_H6_PLL_VIDEO0_REG,
+ SUN50I_H6_PLL_VIDEO1_REG,
+};
+
+static const u32 usb2_clk_regs[] = {
+ SUN50I_H6_USB0_CLK_REG,
+ SUN50I_H6_USB3_CLK_REG,
+};
+
+static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Enable the lock bits on all PLLs */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /*
+ * Force OHCI 12M clock sources to 00 (12MHz divided from 48MHz)
+ *
+ * This clock mux is still mysterious, and the code just enforces
+ * it to have a valid clock parent.
+ */
+ for (i = 0; i < ARRAY_SIZE(usb2_clk_regs); i++) {
+ val = readl(reg + usb2_clk_regs[i]);
+ val &= ~GENMASK(25, 24);
+ writel (val, reg + usb2_clk_regs[i]);
+ }
+
+ /*
+ * Force the post-divider of pll-audio to 8 and the output divider
+ * of it to 1, to make the clock name represents the real frequency.
+ */
+ val = readl(reg + SUN50I_H6_PLL_AUDIO_REG);
+ val &= ~(GENMASK(21, 16) | BIT(0));
+ writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
+}
+
+static const struct of_device_id sun50i_h6_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-h6-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_h6_ccu_driver = {
+ .probe = sun50i_h6_ccu_probe,
+ .driver = {
+ .name = "sun50i-h6-ccu",
+ .of_match_table = sun50i_h6_ccu_ids,
+ },
+};
+builtin_platform_driver(sun50i_h6_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
new file mode 100644
index 000000000000..2ccfe4428260
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2016 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#ifndef _CCU_SUN50I_H6_H_
+#define _CCU_SUN50I_H6_H_
+
+#include <dt-bindings/clock/sun50i-h6-ccu.h>
+#include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+#define CLK_OSC12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_DDR0 2
+
+/* PLL_PERIPH0 exported for PRCM */
+
+#define CLK_PLL_PERIPH0_2X 4
+#define CLK_PLL_PERIPH0_4X 5
+#define CLK_PLL_PERIPH1 6
+#define CLK_PLL_PERIPH1_2X 7
+#define CLK_PLL_PERIPH1_4X 8
+#define CLK_PLL_GPU 9
+#define CLK_PLL_VIDEO0 10
+#define CLK_PLL_VIDEO0_4X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VIDEO1_4X 13
+#define CLK_PLL_VE 14
+#define CLK_PLL_DE 15
+#define CLK_PLL_HSIC 16
+#define CLK_PLL_AUDIO_BASE 17
+#define CLK_PLL_AUDIO 18
+#define CLK_PLL_AUDIO_2X 19
+#define CLK_PLL_AUDIO_4X 20
+
+/* CPUX clock exported for DVFS */
+
+#define CLK_AXI 22
+#define CLK_CPUX_APB 23
+#define CLK_PSI_AHB1_AHB2 24
+#define CLK_AHB3 25
+
+/* APB1 clock exported for PIO */
+
+#define CLK_APB2 27
+#define CLK_MBUS 28
+
+/* All module clocks and bus gates are exported except DRAM */
+
+#define CLK_DRAM 52
+
+#define CLK_BUS_DRAM 60
+
+#define CLK_NUMBER (CLK_BUS_HDCP + 1)
+
+#endif /* _CCU_SUN50I_H6_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 29bc0566b776..77ed0b0ba681 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -69,17 +69,18 @@ static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
- "osc24M", 0x0010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video_clk, "pll-video",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x0018,
@@ -451,11 +452,13 @@ static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tve_parents[] = { "pll-de", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(tve_clk, "tve", tve_parents,
@@ -486,7 +489,8 @@ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
static const char * const hdmi_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
- 0x150, 0, 4, 24, 2, BIT(31), 0);
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
0x154, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 1b4baea37d81..73d7392c968c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -26,7 +26,9 @@
#define CLK_PLL_AUDIO_2X 3
#define CLK_PLL_AUDIO_4X 4
#define CLK_PLL_AUDIO_8X 5
-#define CLK_PLL_VIDEO 6
+
+/* PLL_VIDEO is exported */
+
#define CLK_PLL_VE 7
#define CLK_PLL_DDR 8
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index e58c95787f94..ebd9436d2c7c 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -20,6 +20,18 @@ struct _ccu_nkmp {
unsigned long p, min_p, max_p;
};
+static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
+ unsigned long n, unsigned long k,
+ unsigned long m, unsigned long p)
+{
+ u64 rate = parent;
+
+ rate *= n * k;
+ do_div(rate, m * p);
+
+ return rate;
+}
+
static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
struct _ccu_nkmp *nkmp)
{
@@ -33,7 +45,9 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
unsigned long tmp_rate;
- tmp_rate = parent * _n * _k / (_m * _p);
+ tmp_rate = ccu_nkmp_calc_rate(parent,
+ _n, _k,
+ _m, _p);
if (tmp_rate > rate)
continue;
@@ -81,7 +95,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
- unsigned long n, m, k, p;
+ unsigned long n, m, k, p, rate;
u32 reg;
reg = readl(nkmp->common.base + nkmp->common.reg);
@@ -107,7 +121,11 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
p = reg >> nkmp->p.shift;
p &= (1 << nkmp->p.width) - 1;
- return (parent_rate * n * k >> p) / m;
+ rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nkmp->fixed_post_div;
+
+ return rate;
}
static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -116,6 +134,9 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= nkmp->fixed_post_div;
+
_nkmp.min_n = nkmp->n.min ?: 1;
_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
_nkmp.min_k = nkmp->k.min ?: 1;
@@ -127,17 +148,26 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
- return *parent_rate * _nkmp.n * _nkmp.k / (_nkmp.m * _nkmp.p);
+ rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
+ _nkmp.m, _nkmp.p);
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate / nkmp->fixed_post_div;
+
+ return rate;
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+ u32 n_mask, k_mask, m_mask, p_mask;
struct _ccu_nkmp _nkmp;
unsigned long flags;
u32 reg;
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate * nkmp->fixed_post_div;
+
_nkmp.min_n = nkmp->n.min ?: 1;
_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
_nkmp.min_k = nkmp->k.min ?: 1;
@@ -149,18 +179,20 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
+ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
+ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
+ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
+ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+
spin_lock_irqsave(nkmp->common.lock, flags);
reg = readl(nkmp->common.base + nkmp->common.reg);
- reg &= ~GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
- reg &= ~GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
- reg &= ~GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
- reg &= ~GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
-
- reg |= (_nkmp.n - nkmp->n.offset) << nkmp->n.shift;
- reg |= (_nkmp.k - nkmp->k.offset) << nkmp->k.shift;
- reg |= (_nkmp.m - nkmp->m.offset) << nkmp->m.shift;
- reg |= ilog2(_nkmp.p) << nkmp->p.shift;
+ reg &= ~(n_mask | k_mask | m_mask | p_mask);
+
+ reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
+ reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
+ reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
+ reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
writel(reg, nkmp->common.base + nkmp->common.reg);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
index a82facbc6144..6940503e7fc4 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.h
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -34,6 +34,8 @@ struct ccu_nkmp {
struct ccu_div_internal m;
struct ccu_div_internal p;
+ unsigned int fixed_post_div;
+
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a16de092bf94..4e2073307f34 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -117,6 +117,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nm->fixed_post_div;
+ if (rate < nm->min_rate) {
+ rate = nm->min_rate;
+ if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nm->fixed_post_div;
+ return rate;
+ }
+
if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index eba586b4c7d0..1d8b459c50b7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -37,6 +37,7 @@ struct ccu_nm {
struct ccu_sdm_internal sdm;
unsigned int fixed_post_div;
+ unsigned int min_rate;
struct ccu_common common;
};
@@ -88,6 +89,32 @@ struct ccu_nm {
}, \
}
+#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(_struct, _name, _parent, \
+ _reg, _min_rate, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _frac_en, _frac_sel, \
+ _frac_rate_0, _frac_rate_1,\
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \
+ _frac_rate_0, \
+ _frac_rate_1), \
+ .min_rate = _min_rate, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FRACTIONAL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_mshift, _mwidth, \
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 11a5066e5c27..5234acd30e89 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -515,7 +515,7 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
init.name = "emc";
init.ops = &tegra_clk_emc_ops;
- init.flags = 0;
+ init.flags = CLK_IS_CRITICAL;
init.parent_names = emc_parent_clk_names;
init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 7c369e21c91c..830d1c87fa7c 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
};
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index c02711927d79..2acba2986bc6 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -830,7 +830,7 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
- GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
+ GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IS_CRITICAL),
GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0),
GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 10047107c1dc..89d6b47a27a8 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -125,7 +125,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
/* SCLK */
dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
if (dt_clk) {
- clk = clk_register_divider(NULL, "sclk", "sclk_mux", 0,
+ clk = clk_register_divider(NULL, "sclk", "sclk_mux",
+ CLK_IS_CRITICAL,
clk_base + SCLK_DIVIDER, 0, 8,
0, &sysrate_lock);
*dt_clk = clk;
@@ -137,7 +138,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk = tegra_clk_register_super_mux("sclk",
gen_info->sclk_parents,
gen_info->num_sclk_parents,
- CLK_SET_RATE_PARENT,
+ CLK_SET_RATE_PARENT |
+ CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
*dt_clk = clk;
@@ -151,7 +153,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
&sysrate_lock);
clk = clk_register_gate(NULL, "hclk", "hclk_div",
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SYSTEM_CLK_RATE,
7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
*dt_clk = clk;
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 63087d17c3e2..5d5a22d529f5 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -955,8 +955,7 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
/* PLLM */
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA114_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -1190,6 +1189,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_XUSB_HS_SRC, TEGRA114_CLK_XUSB_SS_DIV2, 61200000, 0 },
{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
+ { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
/* must be the last entry */
{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e81ea5b11577..50088e976611 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1089,8 +1089,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
/* PLLM */
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clk_register_clkdev(clk, "pll_m", NULL);
clks[TEGRA124_CLK_PLL_M] = clk;
@@ -1099,7 +1098,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clk_register_clkdev(clk, "pll_m_out1", NULL);
clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
@@ -1268,11 +1267,11 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
- { TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0 },
+ { TEGRA124_CLK_VDE, TEGRA124_CLK_CLK_MAX, 600000000, 0 },
{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
- { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1 },
+ { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 0 },
{ TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1 },
{ TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1 },
{ TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0 },
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index cbd5a2e5c569..0ee56dd04cec 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -576,6 +576,7 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@@ -651,8 +652,7 @@ static void tegra20_pll_init(void)
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA20_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -660,7 +660,7 @@ static void tegra20_pll_init(void)
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
@@ -723,7 +723,8 @@ static void tegra20_super_clk_init(void)
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
clks[TEGRA20_CLK_SCLK] = clk;
@@ -814,9 +815,6 @@ static void __init tegra20_periph_clk_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, periph_clk_enb_refcnt);
- clks[TEGRA20_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
@@ -1019,13 +1017,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
- { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
- { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
- { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
- { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
+ { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 240000000, 0 },
+ { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 240000000, 0 },
+ { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 240000000, 0 },
+ { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 0 },
{ TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0 },
@@ -1051,6 +1048,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0 },
{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
+ { TEGRA20_CLK_VDE, TEGRA20_CLK_CLK_MAX, 300000000, 0 },
/* must be the last entry */
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 9e6260869eb9..9fb5d51ccce4 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -22,10 +22,12 @@
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/reset/tegra210-car.h>
#include <linux/iopoll.h>
+#include <soc/tegra/pmc.h>
#include "clk.h"
#include "clk-id.h"
@@ -41,6 +43,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
+#define CLK_SOURCE_LA 0x1f8
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -231,6 +234,30 @@
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
+#define LVL2_CLK_GATE_OVRA 0xf8
+#define LVL2_CLK_GATE_OVRC 0x3a0
+#define LVL2_CLK_GATE_OVRD 0x3a4
+#define LVL2_CLK_GATE_OVRE 0x554
+
+/* I2S registers to handle during APE MBIST WAR */
+#define TEGRA210_I2S_BASE 0x1000
+#define TEGRA210_I2S_SIZE 0x100
+#define TEGRA210_I2S_CTRLS 5
+#define TEGRA210_I2S_CG 0x88
+#define TEGRA210_I2S_CTRL 0xa0
+
+/* DISPA registers to handle during MBIST WAR */
+#define DC_CMD_DISPLAY_COMMAND 0xc8
+#define DC_COM_DSC_TOP_CTL 0xcf8
+
+/* VIC register to handle during MBIST WAR */
+#define NV_PVIC_THI_SLCG_OVERRIDE_LOW 0x8c
+
+/* APE, DISPA and VIC base addesses needed for MBIST WAR */
+#define TEGRA210_AHUB_BASE 0x702d0000
+#define TEGRA210_DISPA_BASE 0x54200000
+#define TEGRA210_VIC_BASE 0x54340000
+
/*
* SDM fractional divisor is 16-bit 2's complement signed number within
* (-2^12 ... 2^12-1) range. Represented in PLL data structure as unsigned
@@ -255,8 +282,22 @@ static struct cpu_clk_suspend_context {
} tegra210_cpu_clk_sctx;
#endif
+struct tegra210_domain_mbist_war {
+ void (*handle_lvl2_ovr)(struct tegra210_domain_mbist_war *mbist);
+ const u32 lvl2_offset;
+ const u32 lvl2_mask;
+ const unsigned int num_clks;
+ const unsigned int *clk_init_data;
+ struct clk_bulk_data *clks;
+};
+
+static struct clk **clks;
+
static void __iomem *clk_base;
static void __iomem *pmc_base;
+static void __iomem *ahub_base;
+static void __iomem *dispa_base;
+static void __iomem *vic_base;
static unsigned long osc_freq;
static unsigned long pll_ref_freq;
@@ -267,6 +308,7 @@ static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
+static DEFINE_MUTEX(lvl2_ovr_lock);
/* possible OSC frequencies in Hz */
static unsigned long tegra210_input_freq[] = {
@@ -310,6 +352,8 @@ static const char *mux_pllmcp_clkm[] = {
#define PLLA_MISC2_WRITE_MASK 0x06ffffff
/* PLLD */
+#define PLLD_BASE_CSI_CLKSOURCE (1 << 23)
+
#define PLLD_MISC0_EN_SDM (1 << 16)
#define PLLD_MISC0_LOCK_OVERRIDE (1 << 17)
#define PLLD_MISC0_LOCK_ENABLE (1 << 18)
@@ -513,6 +557,115 @@ void tegra210_set_sata_pll_seq_sw(bool state)
}
EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw);
+static void tegra210_generic_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + mbist->lvl2_offset);
+ writel_relaxed(val | mbist->lvl2_mask, clk_base + mbist->lvl2_offset);
+ fence_udelay(1, clk_base);
+ writel_relaxed(val, clk_base + mbist->lvl2_offset);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_venc_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 csi_src, ovra, ovre;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&pll_d_lock, flags);
+
+ csi_src = readl_relaxed(clk_base + PLLD_BASE);
+ writel_relaxed(csi_src | PLLD_BASE_CSI_CLKSOURCE, clk_base + PLLD_BASE);
+ fence_udelay(1, clk_base);
+
+ ovra = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovra | BIT(15), clk_base + LVL2_CLK_GATE_OVRA);
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovre | BIT(3), clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ writel_relaxed(ovra, clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(csi_src, clk_base + PLLD_BASE);
+ fence_udelay(1, clk_base);
+
+ spin_unlock_irqrestore(&pll_d_lock, flags);
+}
+
+static void tegra210_disp_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 ovra, dsc_top_ctrl;
+
+ ovra = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovra | BIT(1), clk_base + LVL2_CLK_GATE_OVRA);
+ fence_udelay(1, clk_base);
+
+ dsc_top_ctrl = readl_relaxed(dispa_base + DC_COM_DSC_TOP_CTL);
+ writel_relaxed(dsc_top_ctrl | BIT(2), dispa_base + DC_COM_DSC_TOP_CTL);
+ readl_relaxed(dispa_base + DC_CMD_DISPLAY_COMMAND);
+ writel_relaxed(dsc_top_ctrl, dispa_base + DC_COM_DSC_TOP_CTL);
+ readl_relaxed(dispa_base + DC_CMD_DISPLAY_COMMAND);
+
+ writel_relaxed(ovra, clk_base + LVL2_CLK_GATE_OVRA);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_vic_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 ovre, val;
+
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovre | BIT(5), clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ val = readl_relaxed(vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ writel_relaxed(val | BIT(0) | GENMASK(7, 2) | BIT(24),
+ vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ fence_udelay(1, vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+
+ writel_relaxed(val, vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ readl(vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_ape_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ void __iomem *i2s_base;
+ unsigned int i;
+ u32 ovrc, ovre;
+
+ ovrc = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRC);
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovrc | BIT(1), clk_base + LVL2_CLK_GATE_OVRC);
+ writel_relaxed(ovre | BIT(10) | BIT(11),
+ clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ i2s_base = ahub_base + TEGRA210_I2S_BASE;
+
+ for (i = 0; i < TEGRA210_I2S_CTRLS; i++) {
+ u32 i2s_ctrl;
+
+ i2s_ctrl = readl_relaxed(i2s_base + TEGRA210_I2S_CTRL);
+ writel_relaxed(i2s_ctrl | BIT(10),
+ i2s_base + TEGRA210_I2S_CTRL);
+ writel_relaxed(0, i2s_base + TEGRA210_I2S_CG);
+ readl(i2s_base + TEGRA210_I2S_CG);
+ writel_relaxed(1, i2s_base + TEGRA210_I2S_CG);
+ writel_relaxed(i2s_ctrl, i2s_base + TEGRA210_I2S_CTRL);
+ readl(i2s_base + TEGRA210_I2S_CTRL);
+
+ i2s_base += TEGRA210_I2S_SIZE;
+ }
+
+ writel_relaxed(ovrc, clk_base + LVL2_CLK_GATE_OVRC);
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+}
+
static inline void _pll_misc_chk_default(void __iomem *base,
struct tegra_clk_pll_params *params,
u8 misc_num, u32 default_val, u32 mask)
@@ -2411,13 +2564,150 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = {
{ "pll_a1", &pll_a1_params, tegra_clk_pll_a1, "pll_ref" },
};
-static struct clk **clks;
-
static const char * const aclk_parents[] = {
"pll_a1", "pll_c", "pll_p", "pll_a_out0", "pll_c2", "pll_c3",
"clk_m"
};
+static const unsigned int nvjpg_slcg_clkids[] = { TEGRA210_CLK_NVDEC };
+static const unsigned int nvdec_slcg_clkids[] = { TEGRA210_CLK_NVJPG };
+static const unsigned int sor_slcg_clkids[] = { TEGRA210_CLK_HDA2CODEC_2X,
+ TEGRA210_CLK_HDA2HDMI, TEGRA210_CLK_DISP1, TEGRA210_CLK_DISP2 };
+static const unsigned int disp_slcg_clkids[] = { TEGRA210_CLK_LA,
+ TEGRA210_CLK_HOST1X};
+static const unsigned int xusba_slcg_clkids[] = { TEGRA210_CLK_XUSB_HOST,
+ TEGRA210_CLK_XUSB_DEV };
+static const unsigned int xusbb_slcg_clkids[] = { TEGRA210_CLK_XUSB_HOST,
+ TEGRA210_CLK_XUSB_SS };
+static const unsigned int xusbc_slcg_clkids[] = { TEGRA210_CLK_XUSB_DEV,
+ TEGRA210_CLK_XUSB_SS };
+static const unsigned int venc_slcg_clkids[] = { TEGRA210_CLK_HOST1X,
+ TEGRA210_CLK_PLL_D };
+static const unsigned int ape_slcg_clkids[] = { TEGRA210_CLK_ACLK,
+ TEGRA210_CLK_I2S0, TEGRA210_CLK_I2S1, TEGRA210_CLK_I2S2,
+ TEGRA210_CLK_I2S3, TEGRA210_CLK_I2S4, TEGRA210_CLK_SPDIF_OUT,
+ TEGRA210_CLK_D_AUDIO };
+static const unsigned int vic_slcg_clkids[] = { TEGRA210_CLK_HOST1X };
+
+static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
+ [TEGRA_POWERGATE_VENC] = {
+ .handle_lvl2_ovr = tegra210_venc_mbist_war,
+ .num_clks = ARRAY_SIZE(venc_slcg_clkids),
+ .clk_init_data = venc_slcg_clkids,
+ },
+ [TEGRA_POWERGATE_SATA] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(0) | BIT(17) | BIT(19),
+ },
+ [TEGRA_POWERGATE_MPE] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRE,
+ .lvl2_mask = BIT(2),
+ },
+ [TEGRA_POWERGATE_SOR] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .num_clks = ARRAY_SIZE(sor_slcg_clkids),
+ .clk_init_data = sor_slcg_clkids,
+ .lvl2_offset = LVL2_CLK_GATE_OVRA,
+ .lvl2_mask = BIT(1) | BIT(2),
+ },
+ [TEGRA_POWERGATE_DIS] = {
+ .handle_lvl2_ovr = tegra210_disp_mbist_war,
+ .num_clks = ARRAY_SIZE(disp_slcg_clkids),
+ .clk_init_data = disp_slcg_clkids,
+ },
+ [TEGRA_POWERGATE_DISB] = {
+ .num_clks = ARRAY_SIZE(disp_slcg_clkids),
+ .clk_init_data = disp_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRA,
+ .lvl2_mask = BIT(2),
+ },
+ [TEGRA_POWERGATE_XUSBA] = {
+ .num_clks = ARRAY_SIZE(xusba_slcg_clkids),
+ .clk_init_data = xusba_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_XUSBB] = {
+ .num_clks = ARRAY_SIZE(xusbb_slcg_clkids),
+ .clk_init_data = xusbb_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_XUSBC] = {
+ .num_clks = ARRAY_SIZE(xusbc_slcg_clkids),
+ .clk_init_data = xusbc_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_VIC] = {
+ .num_clks = ARRAY_SIZE(vic_slcg_clkids),
+ .clk_init_data = vic_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_vic_mbist_war,
+ },
+ [TEGRA_POWERGATE_NVDEC] = {
+ .num_clks = ARRAY_SIZE(nvdec_slcg_clkids),
+ .clk_init_data = nvdec_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(9) | BIT(31),
+ },
+ [TEGRA_POWERGATE_NVJPG] = {
+ .num_clks = ARRAY_SIZE(nvjpg_slcg_clkids),
+ .clk_init_data = nvjpg_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(9) | BIT(31),
+ },
+ [TEGRA_POWERGATE_AUD] = {
+ .num_clks = ARRAY_SIZE(ape_slcg_clkids),
+ .clk_init_data = ape_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_ape_mbist_war,
+ },
+ [TEGRA_POWERGATE_VE2] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRD,
+ .lvl2_mask = BIT(22),
+ },
+};
+
+int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ int err;
+ struct tegra210_domain_mbist_war *mbist_war;
+
+ if (id >= ARRAY_SIZE(tegra210_pg_mbist_war)) {
+ WARN(1, "unknown domain id in MBIST WAR handler\n");
+ return -EINVAL;
+ }
+
+ mbist_war = &tegra210_pg_mbist_war[id];
+ if (!mbist_war->handle_lvl2_ovr)
+ return 0;
+
+ if (mbist_war->num_clks && !mbist_war->clks)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(mbist_war->num_clks, mbist_war->clks);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&lvl2_ovr_lock);
+
+ mbist_war->handle_lvl2_ovr(mbist_war);
+
+ mutex_unlock(&lvl2_ovr_lock);
+
+ clk_bulk_disable_unprepare(mbist_war->num_clks, mbist_war->clks);
+
+ return 0;
+}
+
void tegra210_put_utmipll_in_iddq(void)
{
u32 reg;
@@ -2654,6 +2944,13 @@ static struct tegra_periph_init_data tegra210_periph[] = {
sor1_parents_idx, 0, &sor1_lock),
};
+static const char * const la_parents[] = {
+ "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_re_out1", "pll_a1", "clk_m", "pll_c4_out0"
+};
+
+static struct tegra_clk_periph tegra210_la =
+ TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, 0);
+
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
@@ -2700,6 +2997,12 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
periph_clk_enb_refcnt);
clks[TEGRA210_CLK_DSIB] = clk;
+ /* la */
+ clk = tegra_clk_register_periph("la", la_parents,
+ ARRAY_SIZE(la_parents), &tegra210_la, clk_base,
+ CLK_SOURCE_LA, 0);
+ clks[TEGRA210_CLK_LA] = clk;
+
/* emc mux */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm), 0,
@@ -3025,7 +3328,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S4, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_HOST1X, TEGRA210_CLK_PLL_P, 136000000, 1 },
{ TEGRA210_CLK_SCLK_MUX, TEGRA210_CLK_PLL_P, 0, 1 },
- { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 1 },
+ { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 0 },
{ TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
@@ -3040,7 +3343,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_XUSB_DEV_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 },
{ TEGRA210_CLK_SATA, TEGRA210_CLK_PLL_P, 104000000, 0 },
{ TEGRA210_CLK_SATA_OOB, TEGRA210_CLK_PLL_P, 204000000, 0 },
- { TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 },
/* TODO find a way to enable this on-demand */
@@ -3149,6 +3451,37 @@ static int tegra210_reset_deassert(unsigned long id)
return 0;
}
+static void tegra210_mbist_clk_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(tegra210_pg_mbist_war); i++) {
+ unsigned int num_clks = tegra210_pg_mbist_war[i].num_clks;
+ struct clk_bulk_data *clk_data;
+
+ if (!num_clks)
+ continue;
+
+ clk_data = kmalloc_array(num_clks, sizeof(*clk_data),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_data))
+ return;
+
+ tegra210_pg_mbist_war[i].clks = clk_data;
+ for (j = 0; j < num_clks; j++) {
+ int clk_id = tegra210_pg_mbist_war[i].clk_init_data[j];
+ struct clk *clk = clks[clk_id];
+
+ if (WARN(IS_ERR(clk), "clk_id: %d\n", clk_id)) {
+ kfree(clk_data);
+ tegra210_pg_mbist_war[i].clks = NULL;
+ break;
+ }
+ clk_data[j].clk = clk;
+ }
+ }
+}
+
/**
* tegra210_clock_init - Tegra210-specific clock initialization
* @np: struct device_node * of the DT node for the SoC CAR IP block
@@ -3183,6 +3516,24 @@ static void __init tegra210_clock_init(struct device_node *np)
return;
}
+ ahub_base = ioremap(TEGRA210_AHUB_BASE, SZ_64K);
+ if (!ahub_base) {
+ pr_err("ioremap tegra210 APE failed\n");
+ return;
+ }
+
+ dispa_base = ioremap(TEGRA210_DISPA_BASE, SZ_256K);
+ if (!dispa_base) {
+ pr_err("ioremap tegra210 DISPA failed\n");
+ return;
+ }
+
+ vic_base = ioremap(TEGRA210_VIC_BASE, SZ_256K);
+ if (!vic_base) {
+ pr_err("ioremap tegra210 VIC failed\n");
+ return;
+ }
+
clks = tegra_clk_init(clk_base, TEGRA210_CLK_CLK_MAX,
TEGRA210_CAR_BANK_COUNT);
if (!clks)
@@ -3219,6 +3570,8 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_add_of_provider(np);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra210_mbist_clk_init();
+
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index bee84c554932..b316dfb6f6c7 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -819,6 +819,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
};
static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
@@ -843,8 +844,7 @@ static void __init tegra30_pll_init(void)
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA30_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -852,7 +852,7 @@ static void __init tegra30_pll_init(void)
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
@@ -990,7 +990,7 @@ static void __init tegra30_super_clk_init(void)
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
clks[TEGRA30_CLK_SCLK] = clk;
@@ -1060,9 +1060,6 @@ static void __init tegra30_periph_clk_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, periph_clk_enb_refcnt);
- clks[TEGRA30_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
@@ -1252,10 +1249,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0 },
{ TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0 },
{ TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0 },
- { TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0 },
{ TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0 },
@@ -1272,6 +1266,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
+ { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 3b2763df51c2..ba7e20e6a82b 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -812,4 +812,11 @@ int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll);
u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
+/* Combined read fence with delay */
+#define fence_udelay(delay, reg) \
+ do { \
+ readl(reg); \
+ udelay(delay); \
+ } while (0)
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f4d6802a8544..7d22e1af2247 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -55,6 +55,29 @@ static void clk_memmap_writel(u32 val, const struct clk_omap_reg *reg)
writel_relaxed(val, io->mem + reg->offset);
}
+static void _clk_rmw(u32 val, u32 mask, void __iomem *ptr)
+{
+ u32 v;
+
+ v = readl_relaxed(ptr);
+ v &= ~mask;
+ v |= val;
+ writel_relaxed(v, ptr);
+}
+
+static void clk_memmap_rmw(u32 val, u32 mask, const struct clk_omap_reg *reg)
+{
+ struct clk_iomap *io = clk_memmaps[reg->index];
+
+ if (reg->ptr) {
+ _clk_rmw(val, mask, reg->ptr);
+ } else if (io->regmap) {
+ regmap_update_bits(io->regmap, reg->offset, mask, val);
+ } else {
+ _clk_rmw(val, mask, io->mem + reg->offset);
+ }
+}
+
static u32 clk_memmap_readl(const struct clk_omap_reg *reg)
{
u32 val;
@@ -89,6 +112,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
ti_clk_ll_ops = ops;
ops->clk_readl = clk_memmap_readl;
ops->clk_writel = clk_memmap_writel;
+ ops->clk_rmw = clk_memmap_rmw;
return 0;
}
@@ -251,6 +275,20 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
return 0;
}
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift)
+{
+ u32 latch;
+
+ if (shift < 0)
+ return;
+
+ latch = 1 << shift;
+
+ ti_clk_ll_ops->clk_rmw(latch, latch, reg);
+ ti_clk_ll_ops->clk_rmw(0, latch, reg);
+ ti_clk_ll_ops->clk_readl(reg); /* OCP barrier */
+}
+
/**
* omap2_clk_provider_init - init master clock provider
* @parent: master node
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index d9b43bfc2532..b58278077226 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -22,6 +22,7 @@ struct clk_omap_divider {
u8 shift;
u8 width;
u8 flags;
+ s8 latch;
const struct clk_div_table *table;
};
@@ -33,6 +34,7 @@ struct clk_omap_mux {
u32 *table;
u32 mask;
u8 shift;
+ s8 latch;
u8 flags;
};
@@ -74,6 +76,11 @@ enum {
#define CLKF_CORE (1 << 9)
#define CLKF_J_TYPE (1 << 10)
+/* CLKCTRL flags */
+#define CLKF_SW_SUP BIT(5)
+#define CLKF_HW_SUP BIT(6)
+#define CLKF_NO_IDLEST BIT(7)
+
#define CLK(dev, con, ck) \
{ \
.lk = { \
@@ -183,10 +190,6 @@ extern const struct omap_clkctrl_data am438x_clkctrl_data[];
extern const struct omap_clkctrl_data dm814_clkctrl_data[];
extern const struct omap_clkctrl_data dm816_clkctrl_data[];
-#define CLKF_SW_SUP BIT(0)
-#define CLKF_HW_SUP BIT(1)
-#define CLKF_NO_IDLEST BIT(2)
-
typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
@@ -194,6 +197,8 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
void ti_clk_add_aliases(void);
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
+
struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 77f93f6d2806..aaa277dd6d99 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -263,6 +263,8 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
+ ti_clk_latch(&divider->reg, divider->latch);
+
return 0;
}
@@ -276,7 +278,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags,
struct clk_omap_reg *reg,
- u8 shift, u8 width, u8 clk_divider_flags,
+ u8 shift, u8 width, s8 latch,
+ u8 clk_divider_flags,
const struct clk_div_table *table)
{
struct clk_omap_divider *div;
@@ -305,6 +308,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
memcpy(&div->reg, reg, sizeof(*reg));
div->shift = shift;
div->width = width;
+ div->latch = latch;
div->flags = clk_divider_flags;
div->hw.init = &init;
div->table = table;
@@ -420,6 +424,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
div->table = _get_div_table_from_setup(setup, &div->width);
div->shift = setup->bit_shift;
+ div->latch = -EINVAL;
return &div->hw;
}
@@ -452,7 +457,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
clk = _register_divider(NULL, setup->name, div->parent,
flags, &reg, div->bit_shift,
- width, div_flags, table);
+ width, -EINVAL, div_flags, table);
if (IS_ERR(clk))
kfree(table);
@@ -556,7 +561,7 @@ static int _get_divider_width(struct device_node *node,
static int __init ti_clk_divider_populate(struct device_node *node,
struct clk_omap_reg *reg, const struct clk_div_table **table,
- u32 *flags, u8 *div_flags, u8 *width, u8 *shift)
+ u32 *flags, u8 *div_flags, u8 *width, u8 *shift, s8 *latch)
{
u32 val;
int ret;
@@ -570,6 +575,13 @@ static int __init ti_clk_divider_populate(struct device_node *node,
else
*shift = 0;
+ if (latch) {
+ if (!of_property_read_u32(node, "ti,latch-bit", &val))
+ *latch = val;
+ else
+ *latch = -EINVAL;
+ }
+
*flags = 0;
*div_flags = 0;
@@ -606,17 +618,18 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
u8 clk_divider_flags = 0;
u8 width = 0;
u8 shift = 0;
+ s8 latch = -EINVAL;
const struct clk_div_table *table = NULL;
u32 flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
if (ti_clk_divider_populate(node, &reg, &table, &flags,
- &clk_divider_flags, &width, &shift))
+ &clk_divider_flags, &width, &shift, &latch))
goto cleanup;
clk = _register_divider(NULL, node->name, parent_name, flags, &reg,
- shift, width, clk_divider_flags, table);
+ shift, width, latch, clk_divider_flags, table);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -639,7 +652,8 @@ static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
return;
if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
- &div->flags, &div->width, &div->shift) < 0)
+ &div->flags, &div->width, &div->shift,
+ NULL) < 0)
goto cleanup;
if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index d4705803f3d3..69a4308a5a98 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -86,6 +86,7 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
val |= index << mux->shift;
ti_clk_ll_ops->clk_writel(val, &mux->reg);
+ ti_clk_latch(&mux->reg, mux->latch);
return 0;
}
@@ -100,7 +101,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
const char * const *parent_names,
u8 num_parents, unsigned long flags,
struct clk_omap_reg *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table)
+ s8 latch, u8 clk_mux_flags, u32 *table)
{
struct clk_omap_mux *mux;
struct clk *clk;
@@ -121,6 +122,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
memcpy(&mux->reg, reg, sizeof(*reg));
mux->shift = shift;
mux->mask = mask;
+ mux->latch = latch;
mux->flags = clk_mux_flags;
mux->table = table;
mux->hw.init = &init;
@@ -160,7 +162,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
flags |= CLK_SET_RATE_PARENT;
return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
- flags, &reg, mux->bit_shift, mask,
+ flags, &reg, mux->bit_shift, mask, -EINVAL,
mux_flags, NULL);
}
@@ -179,6 +181,7 @@ static void of_mux_clk_setup(struct device_node *node)
u8 clk_mux_flags = 0;
u32 mask = 0;
u32 shift = 0;
+ s32 latch = -EINVAL;
u32 flags = CLK_SET_RATE_NO_REPARENT;
num_parents = of_clk_get_parent_count(node);
@@ -197,6 +200,8 @@ static void of_mux_clk_setup(struct device_node *node)
of_property_read_u32(node, "ti,bit-shift", &shift);
+ of_property_read_u32(node, "ti,latch-bit", &latch);
+
if (of_property_read_bool(node, "ti,index-starts-at-one"))
clk_mux_flags |= CLK_MUX_INDEX_ONE;
@@ -211,7 +216,8 @@ static void of_mux_clk_setup(struct device_node *node)
mask = (1 << fls(mask)) - 1;
clk = _register_mux(NULL, node->name, parent_names, num_parents,
- flags, &reg, shift, mask, clk_mux_flags, NULL);
+ flags, &reg, shift, mask, latch, clk_mux_flags,
+ NULL);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -234,6 +240,7 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
return ERR_PTR(-ENOMEM);
mux->shift = setup->bit_shift;
+ mux->latch = -EINVAL;
mux->reg.index = setup->module;
mux->reg.offset = setup->reg;
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index d244e724e198..ebc78ab2df05 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -57,6 +57,14 @@
#define UNIPHIER_PRO4_SYS_CLK_USB3(idx, ch) \
UNIPHIER_CLK_GATE("usb3" #ch, (idx), NULL, 0x2104, 16 + (ch))
+#define UNIPHIER_PRO4_SYS_CLK_AIO(idx) \
+ UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 8), \
+ UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2104, 13)
+
+#define UNIPHIER_PRO5_SYS_CLK_AIO(idx) \
+ UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 12), \
+ UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2104, 13)
+
#define UNIPHIER_LD11_SYS_CLK_AIO(idx) \
UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2108, 0)
@@ -94,16 +102,22 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("upll", -1, "ref", 288, 25), /* 288 MHz */
UNIPHIER_CLK_FACTOR("a2pll", -1, "upll", 256, 125), /* 589.824 MHz */
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("gpll", -1, "ref", 10, 1), /* 250 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
+ UNIPHIER_CLK_GATE("ether-gb", 7, "gpll", 0x2104, 5),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_CLK_GATE("ether-phy", 10, "ref", 0x2260, 0),
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 18),
+ UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x2104, 19),
+ UNIPHIER_PRO4_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -132,6 +146,8 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x2108, 2),
+ UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -149,6 +165,8 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
/* The document mentions 0x2104 bit 18, but not functional */
UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 22),
+ UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -205,6 +223,7 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 4),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
UNIPHIER_LD11_SYS_CLK_EXIV(42),
@@ -233,6 +252,8 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
+ UNIPHIER_CLK_GATE("ether0", 6, NULL, 0x210c, 9),
+ UNIPHIER_CLK_GATE("ether1", 7, NULL, 0x210c, 10),
UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
@@ -241,6 +262,10 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 3),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
+ UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
+ UNIPHIER_CLK_GATE("sata-phy", 30, NULL, 0x210c, 21),
/* CPU gears */
UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index fedc083dc8be..53fd29002401 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -10,8 +10,6 @@ obj-y += clk-sysctrl.o
# Clock definitions
obj-y += u8500_of_clk.o
-obj-y += u9540_clk.o
-obj-y += u8540_clk.o
# ABX500 clock driver
obj-y += abx500-clk.o
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 2257d12ba988..5a86cd8fe5de 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -88,18 +88,6 @@ static int ab8500_reg_clks(struct device *dev)
return 0;
}
-/* Clock definitions for ab8540 */
-static int ab8540_reg_clks(struct device *dev)
-{
- return 0;
-}
-
-/* Clock definitions for ab9540 */
-static int ab9540_reg_clks(struct device *dev)
-{
- return 0;
-}
-
static int abx500_clk_probe(struct platform_device *pdev)
{
struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
@@ -107,10 +95,6 @@ static int abx500_clk_probe(struct platform_device *pdev)
if (is_ab8500(parent) || is_ab8505(parent)) {
ret = ab8500_reg_clks(&pdev->dev);
- } else if (is_ab8540(parent)) {
- ret = ab8540_reg_clks(&pdev->dev);
- } else if (is_ab9540(parent)) {
- ret = ab9540_reg_clks(&pdev->dev);
} else {
dev_err(&pdev->dev, "non supported plf id\n");
return -ENODEV;
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
deleted file mode 100644
index 133859f0e2bf..000000000000
--- a/drivers/clk/ux500/u8540_clk.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Clock definitions for u8540 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-
-/* CLKRST4 is missing making it hard to index things */
-enum clkrst_index {
- CLKRST1_INDEX = 0,
- CLKRST2_INDEX,
- CLKRST3_INDEX,
- CLKRST5_INDEX,
- CLKRST6_INDEX,
- CLKRST_MAX,
-};
-
-static void u8540_clk_init(struct device_node *np)
-{
- struct clk *clk;
- u32 bases[CLKRST_MAX];
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bases); i++) {
- struct resource r;
-
- if (of_address_to_resource(np, i, &r))
- /* Not much choice but to continue */
- pr_err("failed to get CLKRST %d base address\n",
- i + 1);
- bases[i] = r.start;
- }
-
- /* Clock sources. */
- /* Fixed ClockGen */
- clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc0_pll", NULL);
-
- clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc1_pll", NULL);
-
- clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "ddr_pll", NULL);
-
- clk = clk_register_fixed_rate(NULL, "rtc32k", NULL,
- CLK_IGNORE_UNUSED,
- 32768);
- clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
-
- clk = clk_register_fixed_rate(NULL, "ulp38m4", NULL,
- CLK_IGNORE_UNUSED,
- 38400000);
-
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
- clk_register_clkdev(clk, NULL, "UART");
-
- /* msp02clk needs a abx500 clk as parent. Handle by abx500 clk driver */
- clk = clk_reg_prcmu_gate("msp02clk", "ab9540_sysclk12_b1",
- PRCMU_MSP02CLK, 0);
- clk_register_clkdev(clk, NULL, "MSP02");
-
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
- clk_register_clkdev(clk, NULL, "MSP1");
-
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
- clk_register_clkdev(clk, NULL, "I2C");
-
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
- clk_register_clkdev(clk, NULL, "slim");
-
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH1");
-
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH2");
-
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH3");
-
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH5");
-
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH6");
-
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH7");
-
- clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "lcd");
- clk_register_clkdev(clk, "lcd", "mcde");
-
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
- clk_register_clkdev(clk, NULL, "bml");
-
- clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "hdmi");
- clk_register_clkdev(clk, "hdmi", "mcde");
-
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
- clk_register_clkdev(clk, NULL, "apeat");
-
- clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, 0);
- clk_register_clkdev(clk, NULL, "apetrace");
-
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
- clk_register_clkdev(clk, NULL, "mcde");
- clk_register_clkdev(clk, "mcde", "mcde");
- clk_register_clkdev(clk, NULL, "dsilink.0");
- clk_register_clkdev(clk, NULL, "dsilink.1");
- clk_register_clkdev(clk, NULL, "dsilink.2");
-
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
- clk_register_clkdev(clk, NULL, "ipi2");
-
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
- clk_register_clkdev(clk, NULL, "dsialt");
-
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
- clk_register_clkdev(clk, NULL, "dma40.0");
-
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
- clk_register_clkdev(clk, NULL, "b2r2");
- clk_register_clkdev(clk, NULL, "b2r2_core");
- clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
- clk_register_clkdev(clk, NULL, "b2r2_1_core");
-
- clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "tv");
- clk_register_clkdev(clk, "tv", "mcde");
-
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
- clk_register_clkdev(clk, NULL, "SSP");
-
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
- clk_register_clkdev(clk, NULL, "rngclk");
-
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
- clk_register_clkdev(clk, NULL, "uicc");
-
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
- clk_register_clkdev(clk, NULL, "mtu0");
- clk_register_clkdev(clk, NULL, "mtu1");
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL,
- PRCMU_SDMMCCLK, 100000000,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdmmc");
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmchclk", NULL,
- PRCMU_SDMMCHCLK, 400000000,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdmmchclk");
-
- clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, 0);
- clk_register_clkdev(clk, NULL, "hva");
-
- clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, 0);
- clk_register_clkdev(clk, NULL, "g1");
-
- clk = clk_reg_prcmu_scalable("spare1clk", NULL, PRCMU_SPARE1CLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilcd", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
- PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs2", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.2");
-
- clk = clk_reg_prcmu_scalable("dsilcd_pll", "spare1clk",
- PRCMU_PLLDSI_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilcd_pll", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
- PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs0", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi0lcdclk", "dsilcd_pll",
- PRCMU_DSI0CLK_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs0", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.0");
-
- clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
- PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs1", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi1lcdclk", "dsilcd_pll",
- PRCMU_DSI1CLK_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs1", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.1");
-
- clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
- PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.0");
- clk_register_clkdev(clk, "dsilp0", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
- PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.1");
- clk_register_clkdev(clk, "dsilp1", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
- PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.2");
- clk_register_clkdev(clk, "dsilp2", "mcde");
-
- clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "armss", NULL);
-
- clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
- CLK_IGNORE_UNUSED, 1, 2);
- clk_register_clkdev(clk, NULL, "smp_twd");
-
- /* PRCC P-clocks */
- /* Peripheral 1 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart0");
-
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart1");
-
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp0");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.0");
-
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp1");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi0");
-
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
-
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, NULL, "spi3");
-
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, "apb_pclk", "slimbus0");
-
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, NULL, "gpio.0");
- clk_register_clkdev(clk, NULL, "gpio.1");
- clk_register_clkdev(clk, NULL, "gpioblock0");
- clk_register_clkdev(clk, "apb_pclk", "ab85xx-codec.0");
-
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
-
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp3");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.3");
-
- /* Peripheral 2 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
-
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "spi2");
-
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "spi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pwl");
-
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi4");
-
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp2");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.2");
-
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi3");
-
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "spi0");
-
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, NULL, "gpio.6");
- clk_register_clkdev(clk, NULL, "gpio.7");
- clk_register_clkdev(clk, NULL, "gpioblock1");
-
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
- BIT(12), 0);
- clk_register_clkdev(clk, "msp4-pclk", "ab85xx-codec.0");
-
- /* Peripheral 3 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, NULL, "fsmc");
-
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp0");
-
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp1");
-
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
-
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi2");
-
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "ske");
- clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
-
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart2");
-
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi5");
-
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "gpio.2");
- clk_register_clkdev(clk, NULL, "gpio.3");
- clk_register_clkdev(clk, NULL, "gpio.4");
- clk_register_clkdev(clk, NULL, "gpio.5");
- clk_register_clkdev(clk, NULL, "gpioblock2");
-
- clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", bases[CLKRST3_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.5");
-
- clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", bases[CLKRST3_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.6");
-
- clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", bases[CLKRST3_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart3");
-
- clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", bases[CLKRST3_INDEX],
- BIT(12), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart4");
-
- /* Peripheral 5 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "usb", "musb-ux500.0");
- clk_register_clkdev(clk, "usbclk", "ab-iddet.0");
-
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "gpio.8");
- clk_register_clkdev(clk, NULL, "gpioblock3");
-
- /* Peripheral 6 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "rng");
-
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "cryp0");
- clk_register_clkdev(clk, NULL, "cryp1");
-
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "hash0");
-
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pka");
-
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, NULL, "db8540-hash1");
-
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, NULL, "cfgreg");
-
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu0");
-
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu1");
-
- /*
- * PRCC K-clocks ==> see table PRCC_PCKEN/PRCC_KCKEN
- * This differs from the internal implementation:
- * We don't use the PERPIH[n| clock as parent, since those _should_
- * only be used as parents for the P-clocks.
- * TODO: "parentjoin" with corresponding P-clocks for all K-clocks.
- */
-
- /* Peripheral 1 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart0");
-
- clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart1");
-
- clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.1");
-
- clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp0");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.0");
-
- clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp1");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.1");
-
- clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmchclk",
- bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi0");
-
- clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.2");
-
- clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "slimbus0");
-
- clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.4");
-
- clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp3");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.3");
-
- /* Peripheral 2 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.3");
-
- clk = clk_reg_prcc_kclk("p2_pwl_kclk", "rtc32k",
- bases[CLKRST2_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "pwl");
-
- clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmchclk",
- bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi4");
-
- clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp2");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.2");
-
- clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmchclk",
- bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi1");
-
- clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi3");
-
- clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- bases[CLKRST2_INDEX], BIT(6),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
- clk_register_clkdev(clk, "hsir_hsirxclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- bases[CLKRST2_INDEX], BIT(7),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
- clk_register_clkdev(clk, "hsit_hsitxclk", "ste_hsi.0");
-
- /* Should only be 9540, but might be added for 85xx as well */
- clk = clk_reg_prcc_kclk("p2_msp4_kclk", "msp02clk",
- bases[CLKRST2_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp4");
- clk_register_clkdev(clk, "msp4", "ab85xx-codec.0");
-
- /* Peripheral 3 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp0");
-
- clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp1");
-
- clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.0");
-
- clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmchclk",
- bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi2");
-
- clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ske");
- clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
-
- clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart2");
-
- clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi5");
-
- clk = clk_reg_prcc_kclk("p3_i2c5_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(8), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.5");
-
- clk = clk_reg_prcc_kclk("p3_i2c6_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.6");
-
- clk = clk_reg_prcc_kclk("p3_uart3_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(10), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart3");
-
- clk = clk_reg_prcc_kclk("p3_uart4_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(11), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart4");
-
- /* Peripheral 6 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p6_rng_kclk", "rngclk",
- bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "rng");
-}
-CLK_OF_DECLARE(u8540_clks, "stericsson,u8540-clks", u8540_clk_init);
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
deleted file mode 100644
index 7b6bca49ce42..000000000000
--- a/drivers/clk/ux500/u9540_clk.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Clock definitions for u9540 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-
-static void u9540_clk_init(struct device_node *np)
-{
- /* register clocks here */
-}
-CLK_OF_DECLARE(u9540_clks, "stericsson,u9540-clks", u9540_clk_init);
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index e7a868b83fe5..dd08ecb498be 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -44,10 +44,10 @@ static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (WARN_ON(osc->rate_min && rate < osc->rate_min))
+ if (osc->rate_min && rate < osc->rate_min)
rate = osc->rate_min;
- if (WARN_ON(osc->rate_max && rate > osc->rate_max))
+ if (osc->rate_max && rate > osc->rate_max)
rate = osc->rate_max;
return rate;
@@ -104,6 +104,7 @@ static int vexpress_osc_probe(struct platform_device *pdev)
return PTR_ERR(clk);
of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+ clk_hw_set_rate_range(&osc->hw, osc->rate_min, osc->rate_max);
dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9ee2888275c1..8e8a09755d10 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -133,6 +133,14 @@ config VT8500_TIMER
help
Enables support for the VT8500 driver.
+config NPCM7XX_TIMER
+ bool "NPCM7xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
+ While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
+
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index e8e76dfef00b..00caf37e52f9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
+obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 21bffdcb2f20..6c8318470b48 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -17,9 +17,14 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#define TPM_PARAM 0x4
+#define TPM_PARAM_WIDTH_SHIFT 16
+#define TPM_PARAM_WIDTH_MASK (0xff << 16)
#define TPM_SC 0x10
#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
#define TPM_SC_CMOD_DIV_DEFAULT 0x3
+#define TPM_SC_CMOD_DIV_MAX 0x7
+#define TPM_SC_TOF_MASK (0x1 << 7)
#define TPM_CNT 0x14
#define TPM_MOD 0x18
#define TPM_STATUS 0x1c
@@ -29,8 +34,11 @@
#define TPM_C0SC_MODE_SHIFT 2
#define TPM_C0SC_MODE_MASK 0x3c
#define TPM_C0SC_MODE_SW_COMPARE 0x4
+#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
+static int counter_width;
+static int rating;
static void __iomem *timer_base;
static struct clock_event_device clockevent_tpm;
@@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate)
tpm_delay_timer.freq = rate;
register_current_timer_delay(&tpm_delay_timer);
- sched_clock_register(tpm_read_sched_clock, 32, rate);
+ sched_clock_register(tpm_read_sched_clock, counter_width, rate);
return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
- rate, 200, 32, clocksource_mmio_readl_up);
+ rate, rating, counter_width,
+ clocksource_mmio_readl_up);
}
static int tpm_set_next_event(unsigned long delta,
@@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
- return (int)((next - now) <= 0) ? -ETIME : 0;
+ return (int)(next - now) <= 0 ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
@@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = {
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
.set_state_shutdown = tpm_set_state_shutdown,
- .rating = 200,
};
static int __init tpm_clockevent_init(unsigned long rate, int irq)
@@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq)
ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"i.MX7ULP TPM Timer", &clockevent_tpm);
+ clockevent_tpm.rating = rating;
clockevent_tpm.cpumask = cpumask_of(0);
clockevent_tpm.irq = irq;
- clockevents_config_and_register(&clockevent_tpm,
- rate, 300, 0xfffffffe);
+ clockevents_config_and_register(&clockevent_tpm, rate, 300,
+ GENMASK(counter_width - 1, 1));
return ret;
}
@@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np)
ipg = of_clk_get_by_name(np, "ipg");
per = of_clk_get_by_name(np, "per");
if (IS_ERR(ipg) || IS_ERR(per)) {
- pr_err("tpm: failed to get igp or per clk\n");
+ pr_err("tpm: failed to get ipg or per clk\n");
ret = -ENODEV;
goto err_clk_get;
}
@@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np)
goto err_per_clk_enable;
}
+ counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
+ >> TPM_PARAM_WIDTH_SHIFT;
+ /* use rating 200 for 32-bit counter and 150 for 16-bit counter */
+ rating = counter_width == 0x20 ? 200 : 150;
+
/*
* Initialize tpm module to a known state
* 1) Counter disabled
@@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np)
* 4) Channel0 disabled
* 5) DMA transfers disabled
*/
+ /* make sure counter is disabled */
writel(0, timer_base + TPM_SC);
+ /* TOF is W1C */
+ writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
writel(0, timer_base + TPM_CNT);
- writel(0, timer_base + TPM_C0SC);
+ /* CHF is W1C */
+ writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
- /* increase per cnt, div 8 by default */
- writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
+ /*
+ * increase per cnt,
+ * div 8 for 32-bit counter and div 128 for 16-bit counter
+ */
+ writel(TPM_SC_CMOD_INC_PER_CNT |
+ (counter_width == 0x20 ?
+ TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
timer_base + TPM_SC);
/* set MOD register to maximum for free running mode */
- writel(0xffffffff, timer_base + TPM_MOD);
+ writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
rate = clk_get_rate(per) >> 3;
ret = tpm_clocksource_init(rate);
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
new file mode 100644
index 000000000000..7a9bb5532d99
--- /dev/null
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com
+ * All rights reserved.
+ *
+ * Copyright 2017 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include "timer-of.h"
+
+/* Timers registers */
+#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
+#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
+#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
+#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
+#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
+#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
+
+/* Timers control */
+#define NPCM7XX_Tx_RESETINT 0x1f
+#define NPCM7XX_Tx_PERIOD BIT(27)
+#define NPCM7XX_Tx_INTEN BIT(29)
+#define NPCM7XX_Tx_COUNTEN BIT(30)
+#define NPCM7XX_Tx_ONESHOT 0x0
+#define NPCM7XX_Tx_OPER GENMASK(3, 27)
+#define NPCM7XX_Tx_MIN_PRESCALE 0x1
+#define NPCM7XX_Tx_TDR_MASK_BITS 24
+#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
+#define NPCM7XX_T0_CLR_INT 0x1
+#define NPCM7XX_Tx_CLR_CSR 0x0
+
+/* Timers operating mode */
+#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
+ NPCM7XX_Tx_INTEN | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
+ NPCM7XX_Tx_INTEN | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
+ NPCM7XX_Tx_MIN_PRESCALE)
+
+#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
+
+static int npcm7xx_timer_resume(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_Tx_COUNTEN;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_COUNTEN;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_OPER;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_START_ONESHOT_Tx;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_timer_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val &= ~NPCM7XX_Tx_OPER;
+
+ writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
+ val |= NPCM7XX_START_PERIODIC_Tx;
+
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static int npcm7xx_clockevent_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val;
+
+ writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
+ val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
+ val |= NPCM7XX_START_Tx;
+ writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
+
+ return 0;
+}
+
+static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(evt);
+
+ writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of npcm7xx_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "npcm7xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = npcm7xx_clockevent_set_next_event,
+ .set_state_shutdown = npcm7xx_timer_shutdown,
+ .set_state_periodic = npcm7xx_timer_periodic,
+ .set_state_oneshot = npcm7xx_timer_oneshot,
+ .tick_resume = npcm7xx_timer_resume,
+ .rating = 300,
+ },
+
+ .of_irq = {
+ .handler = npcm7xx_timer0_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static void __init npcm7xx_clockevents_init(void)
+{
+ writel(NPCM7XX_DEFAULT_CSR,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
+
+ writel(NPCM7XX_Tx_RESETINT,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
+
+ npcm7xx_to.clkevt.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&npcm7xx_to.clkevt,
+ timer_of_rate(&npcm7xx_to),
+ 0x1, NPCM7XX_Tx_MAX_CNT);
+}
+
+static void __init npcm7xx_clocksource_init(void)
+{
+ u32 val;
+
+ writel(NPCM7XX_DEFAULT_CSR,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+ writel(NPCM7XX_Tx_MAX_CNT,
+ timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
+
+ val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+ val |= NPCM7XX_START_Tx;
+ writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
+
+ clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
+ NPCM7XX_REG_TDR1,
+ "npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
+ 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
+ clocksource_mmio_readl_down);
+}
+
+static int __init npcm7xx_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &npcm7xx_to);
+ if (ret)
+ return ret;
+
+ /* Clock input is divided by PRESCALE + 1 before it is fed */
+ /* to the counter */
+ npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
+ (NPCM7XX_Tx_MIN_PRESCALE + 1);
+
+ npcm7xx_clocksource_init();
+ npcm7xx_clockevents_init();
+
+ pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
+ timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
+
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index a782ce87715c..ed5e42461094 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -262,6 +262,8 @@ void proc_coredump_connector(struct task_struct *task)
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
+ ev->event_data.coredump.parent_pid = task->real_parent->pid;
+ ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
@@ -288,6 +290,8 @@ void proc_exit_connector(struct task_struct *task)
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
+ ev->event_data.exit.parent_pid = task->real_parent->pid;
+ ev->event_data.exit.parent_tgid = task->real_parent->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..96b35b8b3606 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU && CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
-config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
- depends on ARM_BRCMSTB_AVS_CPUFREQ
- help
- Enabling this option turns on debug support via sysfs under
- /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
- write some AVS mailbox registers through sysfs entries.
-
- If in doubt, say N.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
#include <linux/platform_device.h>
#include <linux/semaphore.h>
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#endif
-
/* Max number of arguments AVS calls take */
#define AVS_MAX_CMD_ARGS 4
/*
@@ -182,88 +175,11 @@ struct private_data {
void __iomem *base;
void __iomem *avs_intr_base;
struct device *dev;
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- struct dentry *debugfs;
-#endif
struct completion done;
struct semaphore sem;
struct pmap pmap;
};
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-enum debugfs_format {
- DEBUGFS_NORMAL,
- DEBUGFS_FLOAT,
- DEBUGFS_REV,
-};
-
-struct debugfs_data {
- struct debugfs_entry *entry;
- struct private_data *priv;
-};
-
-struct debugfs_entry {
- char *name;
- u32 offset;
- fmode_t mode;
- enum debugfs_format format;
-};
-
-#define DEBUGFS_ENTRY(name, mode, format) { \
- #name, AVS_MBOX_##name, mode, format \
-}
-
-/*
- * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
- */
-#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
-#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
-#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
-#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
-
-/*
- * This table stores the name, access permissions and offset for each hardware
- * register and is used to generate debugfs entries.
- */
-static struct debugfs_entry debugfs_entries[] = {
- DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
- DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
-};
-
-static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
-
-static char *__strtolower(char *s)
-{
- char *p;
-
- for (p = s; *p; p++)
- *p = tolower(*p);
-
- return s;
-}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
static void __iomem *__map_region(const char *name)
{
struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
return table;
}
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-#define MANT(x) (unsigned int)(abs((x)) / 1000)
-#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
-
-static int brcm_avs_debug_show(struct seq_file *s, void *data)
-{
- struct debugfs_data *dbgfs = s->private;
- void __iomem *base;
- u32 val, offset;
-
- if (!dbgfs) {
- seq_puts(s, "No device pointer\n");
- return 0;
- }
-
- base = dbgfs->priv->base;
- offset = dbgfs->entry->offset;
- val = readl(base + offset);
- switch (dbgfs->entry->format) {
- case DEBUGFS_NORMAL:
- seq_printf(s, "%u\n", val);
- break;
- case DEBUGFS_FLOAT:
- seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
- break;
- case DEBUGFS_REV:
- seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
- (val >> 16 & 0xff), (val >> 8 & 0xff),
- val & 0xff);
- break;
- }
- seq_printf(s, "0x%08x\n", val);
-
- return 0;
-}
-
-#undef MANT
-#undef FRAC
-
-static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct debugfs_data *dbgfs = s->private;
- struct private_data *priv = dbgfs->priv;
- void __iomem *base, *avs_intr_base;
- bool use_issue_command = false;
- unsigned long val, offset;
- char str[128];
- int ret;
- char *str_ptr = str;
-
- if (size >= sizeof(str))
- return -E2BIG;
-
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, size);
- if (ret)
- return ret;
-
- base = priv->base;
- avs_intr_base = priv->avs_intr_base;
- offset = dbgfs->entry->offset;
- /*
- * Special case writing to "command" entry only: if the string starts
- * with a 'c', we use the driver's __issue_avs_command() function.
- * Otherwise, we perform a raw write. This should allow testing of raw
- * access as well as using the higher level function. (Raw access
- * doesn't clear the firmware return status after issuing the command.)
- */
- if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
- use_issue_command = true;
- str_ptr++;
- }
- if (kstrtoul(str_ptr, 0, &val) != 0)
- return -EINVAL;
-
- /*
- * Setting the P-state is a special case. We need to update the CPU
- * frequency we report.
- */
- if (val == AVS_CMD_SET_PSTATE) {
- struct cpufreq_policy *policy;
- unsigned int pstate;
-
- policy = cpufreq_cpu_get(smp_processor_id());
- /* Read back the P-state we are about to set */
- pstate = readl(base + AVS_MBOX_PARAM(0));
- if (use_issue_command) {
- ret = brcm_avs_target_index(policy, pstate);
- return ret ? ret : size;
- }
- policy->cur = policy->freq_table[pstate].frequency;
- }
-
- if (use_issue_command) {
- ret = __issue_avs_command(priv, val, false, NULL);
- } else {
- /* Locking here is not perfect, but is only for debug. */
- ret = down_interruptible(&priv->sem);
- if (ret)
- return ret;
-
- writel(val, base + offset);
- /* We have to wake up the firmware to process a command. */
- if (offset == AVS_MBOX_COMMAND)
- writel(AVS_CPU_L2_INT_MASK,
- avs_intr_base + AVS_CPU_L2_SET0);
- up(&priv->sem);
- }
-
- return ret ? ret : size;
-}
-
-static struct debugfs_entry *__find_debugfs_entry(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
- if (strcasecmp(debugfs_entries[i].name, name) == 0)
- return &debugfs_entries[i];
-
- return NULL;
-}
-
-static int brcm_avs_debug_open(struct inode *inode, struct file *file)
-{
- struct debugfs_data *data;
- fmode_t fmode;
- int ret;
-
- /*
- * seq_open(), which is called by single_open(), clears "write" access.
- * We need write access to some files, so we preserve our access mode
- * and restore it.
- */
- fmode = file->f_mode;
- /*
- * Check access permissions even for root. We don't want to be writing
- * to read-only registers. Access for regular users has already been
- * checked by the VFS layer.
- */
- if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
- return -EACCES;
-
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- /*
- * We use the same file system operations for all our debug files. To
- * produce specific output, we look up the file name upon opening a
- * debugfs entry and map it to a memory offset. This offset is then used
- * in the generic "show" function to read a specific register.
- */
- data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
- data->priv = inode->i_private;
-
- ret = single_open(file, brcm_avs_debug_show, data);
- if (ret)
- kfree(data);
- file->f_mode = fmode;
-
- return ret;
-}
-
-static int brcm_avs_debug_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq_priv = file->private_data;
- struct debugfs_data *data = seq_priv->private;
-
- kfree(data);
- return single_release(inode, file);
-}
-
-static const struct file_operations brcm_avs_debug_ops = {
- .open = brcm_avs_debug_open,
- .read = seq_read,
- .write = brcm_avs_seq_write,
- .llseek = seq_lseek,
- .release = brcm_avs_debug_release,
-};
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
- struct dentry *dir;
- int i;
-
- if (!priv)
- return;
-
- dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
- if (IS_ERR_OR_NULL(dir))
- return;
- priv->debugfs = dir;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
- /*
- * The DEBUGFS_ENTRY macro generates uppercase strings. We
- * convert them to lowercase before creating the debugfs
- * entries.
- */
- char *entry = __strtolower(debugfs_entries[i].name);
- fmode_t mode = debugfs_entries[i].mode;
-
- if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
- dir, priv, &brcm_avs_debug_ops)) {
- priv->debugfs = NULL;
- debugfs_remove_recursive(dir);
- break;
- }
- }
-}
-
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
-
- if (priv && priv->debugfs) {
- debugfs_remove_recursive(priv->debugfs);
- priv->debugfs = NULL;
- }
-}
-
-#else
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
/*
* To ensure the right firmware is running we need to
* - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
return ret;
brcm_avs_driver.driver_data = pdev;
- ret = cpufreq_register_driver(&brcm_avs_driver);
- if (!ret)
- brcm_avs_cpufreq_debug_init(pdev);
- return ret;
+ return cpufreq_register_driver(&brcm_avs_driver);
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
if (ret)
return ret;
- brcm_avs_cpufreq_debug_exit(pdev);
-
priv = platform_get_drvdata(pdev);
iounmap(priv->base);
iounmap(priv->avs_intr_base);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bc5fc1630876..b15115a48775 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
cpu->perf_caps.lowest_perf, cpu_num, ret);
}
+/*
+ * The PCC subspace describes the rate at which platform can accept commands
+ * on the shared PCC channel (including READs which do not count towards freq
+ * trasition requests), so ideally we need to use the PCC values as a fallback
+ * if we don't have a platform specific transition_delay_us
+ */
+#ifdef CONFIG_ARM64
+#include <asm/cputype.h>
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_num = read_cpuid_part_number();
+ unsigned int delay_us = 0;
+
+ switch (implementor) {
+ case ARM_CPU_IMP_QCOM:
+ switch (part_num) {
+ case QCOM_CPU_PART_FALKOR_V1:
+ case QCOM_CPU_PART_FALKOR:
+ delay_us = 10000;
+ break;
+ default:
+ delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ break;
+ }
+ break;
+ default:
+ delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ break;
+ }
+
+ return delay_us;
+}
+
+#else
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+ return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu;
@@ -162,8 +205,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->perf_caps.highest_perf;
policy->cpuinfo.max_freq = cppc_dmi_max_khz;
- policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
- NSEC_PER_USEC;
+ policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
policy->shared_type = cpu->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 0591874856d3..54edaec1e608 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If the timer has migrated to the different cpu then bring
+ * it back to one of the policy->cpus
+ */
+ if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+ gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+ add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
/*
* If PMCR was last updated was using fast_swtich then
@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
+ set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
-
- /* Timer may get migrated to a different cpu on cpu hot unplug */
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
}
/*
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
deleted file mode 100644
index ee328374dba8..000000000000
--- a/drivers/crypto/qat/qat_common/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index be8606457f27..aff2c1594220 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -19,6 +19,7 @@
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include "dax-private.h"
#include "dax.h"
@@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
+ .mmap_supported_flags = MAP_SYNC,
};
static void dev_dax_release(struct device *dev)
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 7afbb28d6a0f..1bc5ffb338c8 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,7 +270,7 @@ EXPORT_SYMBOL_GPL(dca_remove_requester);
* @dev - the device that wants dca service
* @cpu - the cpuid as returned by get_cpu()
*/
-u8 dca_common_get_tag(struct device *dev, int cpu)
+static u8 dca_common_get_tag(struct device *dev, int cpu)
{
struct dca_provider *dca;
u8 tag;
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d29275b97e84..4a828c18099a 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -524,6 +524,14 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
}
+static int bam_pm_runtime_get_sync(struct device *dev)
+{
+ if (pm_runtime_enabled(dev))
+ return pm_runtime_get_sync(dev);
+
+ return 0;
+}
+
/**
* bam_free_chan - Frees dma resources associated with specific channel
* @chan: specified channel
@@ -539,7 +547,7 @@ static void bam_free_chan(struct dma_chan *chan)
unsigned long flags;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@@ -720,7 +728,7 @@ static int bam_pause(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -746,7 +754,7 @@ static int bam_resume(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -852,7 +860,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -969,7 +977,7 @@ static void bam_start_dma(struct bam_chan *bchan)
if (!vd)
return;
- ret = pm_runtime_get_sync(bdev->dev);
+ ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e6f17825db79..2b90606452a2 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
struct clock_info *ci = handle->clk_priv;
struct scmi_clock_info *clk = ci->clk + clk_id;
- if (!clk->name || !clk->name[0])
+ if (!clk->name[0])
return NULL;
return clk;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 14b147135a0c..2455be8cbc4f 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -778,6 +778,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
+ return;
}
/* setup handle now as the transport is ready */
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 6feeacbe4d97..54e66adef252 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -187,7 +187,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
char *s;
int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot] || dm->length <= index + 16)
+ if (dmi_ident[slot] || dm->length < index + 16)
return;
d = (u8 *) dm + index;
@@ -211,9 +211,9 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x020600)
- sprintf(s, "%pUL", d);
+ sprintf(s, "%pUl", d);
else
- sprintf(s, "%pUB", d);
+ sprintf(s, "%pUb", d);
dmi_ident[slot] = s;
}
@@ -792,7 +792,15 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
- if (dmi_ident[s]) {
+ if (s == DMI_OEM_STRING) {
+ /* DMI_OEM_STRING must be exact match */
+ const struct dmi_device *valid;
+
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+ dmi->matches[i].substr, NULL);
+ if (valid)
+ continue;
+ } else if (dmi_ident[s]) {
if (dmi->matches[i].exact_match) {
if (!strcmp(dmi_ident[s],
dmi->matches[i].substr))
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index b9bd827caa22..1b4d465cc5d9 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -98,6 +98,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
(phys_seed >> 32) & mask : TEXT_OFFSET;
/*
+ * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
+ * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
+ * we preserve the misalignment of 'offset' relative to
+ * EFI_KIMG_ALIGN so that statically allocated objects whose
+ * alignment exceeds PAGE_SIZE appear correctly aligned in
+ * memory.
+ */
+ offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
+
+ /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 14f14efdf0d5..06d212a3d49d 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
conf->data = of_id->data;
conf->spi = spi;
- conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+ conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
if (IS_ERR(conf->config)) {
dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
PTR_ERR(conf->config));
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 77e485557498..6f693b7d5220 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
reg |= bit;
else
- reg &= bit;
+ reg &= ~bit;
iowrite32(reg, addr);
spin_unlock_irqrestore(&gpio->lock, flags);
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 1948724d8c36..25d16b2af1c3 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -116,9 +116,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
- u8 __iomem ports[] = {
- idio16gpio->reg->out0_7, idio16gpio->reg->out8_15,
- idio16gpio->reg->in0_7, idio16gpio->reg->in8_15,
+ void __iomem *ports[] = {
+ &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
+ &idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15,
};
/* clear bits array to a clean slate */
@@ -143,7 +143,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
}
/* read bits from current gpio port */
- port_state = ioread8(ports + i);
+ port_state = ioread8(ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= port_state << word_offset;
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 835607ecf658..f953541e7890 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -206,10 +206,10 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
- u8 __iomem ports[] = {
- idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
- idio24gpio->reg->out16_23, idio24gpio->reg->in0_7,
- idio24gpio->reg->in8_15, idio24gpio->reg->in16_23,
+ void __iomem *ports[] = {
+ &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
+ &idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
+ &idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
};
const unsigned long out_mode_mask = BIT(1);
@@ -217,7 +217,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
- for (i = 0; i < ARRAY_SIZE(ports); i++) {
+ for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
@@ -236,7 +236,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
/* read bits from current gpio port (port 6 is TTL GPIO) */
if (i < 6)
- port_state = ioread8(ports + i);
+ port_state = ioread8(ports[i]);
else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
else
@@ -301,9 +301,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
const unsigned long port_mask = GENMASK(gpio_reg_size, 0);
unsigned long flags;
unsigned int out_state;
- u8 __iomem ports[] = {
- idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
- idio24gpio->reg->out16_23
+ void __iomem *ports[] = {
+ &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
+ &idio24gpio->reg->out16_23
};
const unsigned long out_mode_mask = BIT(1);
const unsigned int ttl_offset = 48;
@@ -327,9 +327,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
/* process output lines */
- out_state = ioread8(ports + i) & ~gpio_mask;
+ out_state = ioread8(ports[i]) & ~gpio_mask;
out_state |= (*bits >> bits_offset) & gpio_mask;
- iowrite8(out_state, ports + i);
+ iowrite8(out_state, ports[i]);
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 43aeb07343ec..d8ccb500872f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -497,7 +497,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
struct file *file;
- int fd, i, ret;
+ int fd, i, count = 0, ret;
u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -558,6 +558,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
+ count = i;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -628,7 +629,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
out_put_unused_fd:
put_unused_fd(fd);
out_free_descs:
- for (; i >= 0; i--)
+ for (i = 0; i < count; i++)
gpiod_free(lh->descs[i]);
kfree(lh->label);
out_free_lh:
@@ -902,7 +903,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
- goto out_free_desc;
+ goto out_free_label;
le->desc = desc;
le->eflags = eflags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f44a83ab2bf4..c8b605f3dc05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -890,6 +890,7 @@ struct amdgpu_gfx_funcs {
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
+ void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
};
struct amdgpu_ngg_buf {
@@ -1378,6 +1379,7 @@ enum amd_hw_ip_block_type {
ATHUB_HWIP,
NBIO_HWIP,
MP0_HWIP,
+ MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
VCE_HWIP,
@@ -1387,6 +1389,7 @@ enum amd_hw_ip_block_type {
SMUIO_HWIP,
PWR_HWIP,
NBIF_HWIP,
+ THM_HWIP,
MAX_HWIP
};
@@ -1812,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
/* Common functions */
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 09d35051fdd6..3fabf9f97022 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
if (other) {
signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait(other, true);
if (r < 0) {
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
return r;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 369beb5041a2..448d69fe3756 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -64,16 +64,21 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
#if defined(CONFIG_DEBUG_FS)
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+
+static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
+ char __user *buf, size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
+ bool pm_pg_lock, use_bank, use_ring;
+ unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
- if (size & 0x3 || *pos & 0x3)
+ pm_pg_lock = use_bank = use_ring = false;
+ instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
+
+ if (size & 0x3 || *pos & 0x3 ||
+ ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
@@ -91,8 +96,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
+ } else if (*pos & (1ULL << 61)) {
+
+ me = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ use_ring = 1;
} else {
- use_bank = 0;
+ use_bank = use_ring = 0;
}
*pos &= (1UL << 22) - 1;
@@ -104,6 +116,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
+ } else if (use_ring) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
}
if (pm_pg_lock)
@@ -115,8 +130,14 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (*pos > adev->rmmio_size)
goto end;
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
+ if (read) {
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ WREG32(*pos >> 2, value);
+ }
if (r) {
result = r;
goto end;
@@ -132,6 +153,9 @@ end:
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
+ } else if (use_ring) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
if (pm_pg_lock)
@@ -140,78 +164,17 @@ end:
return result;
}
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
+}
+
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
+ return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7379aa5a6849..0b19482b36b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -922,6 +922,11 @@ static int __init amdgpu_init(void)
{
int r;
+ if (vgacon_text_force()) {
+ DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+ return -EINVAL;
+ }
+
r = amdgpu_sync_init();
if (r)
goto error_sync;
@@ -930,10 +935,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- if (vgacon_text_force()) {
- DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
- return -EINVAL;
- }
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 455a81e4c246..97449e06a242 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,6 +410,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
+ long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -433,11 +434,16 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+ /* for non-sriov case, no timeout enforce on compute ring */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ && !amdgpu_sriov_vf(ring->adev))
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ?
- MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
- ring->name);
+ timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 28c2706e48d7..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
+retry:
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, type, resv, &bo);
if (r) {
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
+ }
return r;
}
*obj = &bo->gem_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index fac4b6067efd..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
struct amdgpu_bo *bo;
unsigned long page_align;
size_t acc_size;
- u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@@ -418,23 +417,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- domains = bo->preferred_domains;
-retry:
- amdgpu_ttm_placement_from_domain(bo, domains);
+ amdgpu_ttm_placement_from_domain(bo, domain);
+
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
NULL, resv, &amdgpu_ttm_bo_destroy);
-
- if (unlikely(r && r != -ERESTARTSYS)) {
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- } else if (domains != bo->preferred_domains) {
- domains = bo->allowed_domains;
- goto retry;
- }
- }
- if (unlikely(r))
+ if (unlikely(r != 0))
return r;
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 19e71f4a8ac2..c7d43e064fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -505,6 +505,9 @@ failed:
int psp_gpu_reset(struct amdgpu_device *adev)
{
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
return psp_mode1_reset(&adev->psp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f48ea0dad875..a7576255cc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -859,7 +859,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 0fff5b8cd318..cd6bf291a853 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3061,11 +3061,18 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ DRM_INFO("Not implemented\n");
+}
+
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
.read_wave_data = &gfx_v6_0_read_wave_data,
.read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
static int gfx_v6_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index e13d9d83767b..42b6144c1fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4270,11 +4270,18 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ cik_srbm_select(adev, me, pipe, q, 0);
+}
+
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
.read_wave_data = &gfx_v7_0_read_wave_data,
.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 27943e57681c..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
static const u32 vgpr_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
- mmCOMPUTE_RESOURCE_LIMITS, 0,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*4,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
static const u32 sgpr1_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -3475,6 +3478,12 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
WREG32(mmGRBM_GFX_INDEX, data);
}
+static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ vi_srbm_select(adev, me, pipe, q, 0);
+}
+
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -5442,6 +5451,7 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
static int gfx_v8_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1ae3de1094f9..9d39fd5b1822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -998,12 +998,19 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ soc15_grbm_select(adev, me, pipe, q, 0);
+}
+
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2757,6 +2764,45 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
return 0;
}
+static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int j;
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+
+ if (j == AMDGPU_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("KIQ dequeue request failed.\n");
+
+ /* Manual disable if dequeue request times out */
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+ }
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
+ 0);
+ }
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
+
+ return 0;
+}
+
static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3010,7 +3056,6 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
return r;
}
-
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3033,6 +3078,20 @@ static int gfx_v9_0_hw_fini(void *handle)
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
+
+ /* Use deinitialize sequence from CAIL when unbinding device from driver,
+ * otherwise KIQ is hanging when binding back
+ */
+ if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
+ adev->gfx.kiq.ring.pipe,
+ adev->gfx.kiq.ring.queue, 0);
+ gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6452101c7aab..c7190c39c4f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -837,7 +837,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ecaef084dab1..be20a387d961 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1105,7 +1105,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2a8184082cd1..399f876f9cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,7 +1121,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index b154667a8fd9..a675ec6d2811 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,71 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
}
}
+static int si_get_pcie_lanes(struct amdgpu_device *adev)
+{
+ u32 link_width_cntl;
+
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ case LC_LINK_WIDTH_X1:
+ return 1;
+ case LC_LINK_WIDTH_X2:
+ return 2;
+ case LC_LINK_WIDTH_X4:
+ return 4;
+ case LC_LINK_WIDTH_X8:
+ return 8;
+ case LC_LINK_WIDTH_X0:
+ case LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
+{
+ u32 link_width_cntl, mask;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ switch (lanes) {
+ case 0:
+ mask = LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = LC_LINK_WIDTH_X8;
+ break;
+ case 16:
+ mask = LC_LINK_WIDTH_X16;
+ break;
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
+ return;
+ }
+
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (LC_RECONFIG_NOW |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1262,6 +1327,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
+ .get_pcie_lanes = &si_get_pcie_lanes,
+ .set_pcie_lanes = &si_set_pcie_lanes,
.get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 672eaffac0a5..797d505bf9ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6372,9 +6372,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
{
u32 lane_width;
u32 new_lane_width =
- (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4c45db7f1157..45aafca7f315 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
@@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
-
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ed2f06c9f346..3858820a0055 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,5 +6,6 @@ config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && X86_64
imply AMD_IOMMU_V2
+ select MMU_NOTIFIER
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..59808a39ecf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
- return -EINVAL;
-
- /* Reading GPU clock counter from KGD */
- args->gpu_clock_counter =
- dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ if (dev)
+ /* Reading GPU clock counter from KGD */
+ args->gpu_clock_counter =
+ dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ else
+ /* Node without GPU resource */
+ args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
return ret;
}
-bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
{
struct kfd_local_mem_info mem_info;
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
- err = PTR_ERR(pdd);
+ err = -EINVAL;
goto bind_process_to_device_failed;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e42a28e3adc5..1dd1142246c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
return ret;
}
+
+static void register_backlight_device(struct amdgpu_display_manager *dm,
+ struct dc_link *link)
+{
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+ link->type != dc_connection_none) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+}
+
+
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
@@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
@@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
- DETECT_REASON_BOOT))
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
+ register_backlight_device(dm, link);
+ }
+
+
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -2685,7 +2713,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+ link->type != dc_connection_none) {
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev) {
@@ -3561,6 +3590,7 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
+
/* Note: this function assumes that dc_link_detect() was called for the
* dc_link which will be represented by this aconnector.
*/
@@ -3630,28 +3660,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
- /* NOTE: this currently will create backlight device even if a panel
- * is not connected to the eDP/LVDS connector.
- *
- * This is less than ideal but we don't have sink information at this
- * stage since detection happens after. We can't do detection earlier
- * since MST detection needs connectors to be created first.
- */
- if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
- /* Event if registration failed, we should continue with
- * DM initialization because not having a backlight control
- * is better then a black screen.
- */
- amdgpu_dm_register_backlight_device(dm);
-
- if (dm->backlight_dev)
- dm->backlight_link = link;
- }
-#endif
-
out_free:
if (res) {
kfree(i2c);
@@ -4549,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *new_con_state = NULL;
struct dm_connector_state *dm_conn_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
@@ -4556,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc);
+ new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
+
+ if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
/* TODO This hack should go away */
@@ -4752,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
if (!dm_old_crtc_state->stream)
continue;
- DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, old_plane_crtc->base.id);
if (!dc_remove_plane_from_context(
@@ -4840,33 +4856,6 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- struct drm_plane *plane;
- struct drm_crtc_state *crtc_state;
-
- WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
-
- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
- struct drm_plane_state *plane_state =
- drm_atomic_get_plane_state(state, plane);
-
- if (IS_ERR(plane_state))
- return -EDEADLK;
-
- crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc->primary == plane && crtc_state->active) {
- if (!plane_state->fb)
- return -EINVAL;
- }
- }
- return 0;
-}
-
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -4890,10 +4879,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- ret = dm_atomic_check_plane_state_fb(state, crtc);
- if (ret)
- goto fail;
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed)
continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index f6cb502c303f..25f064c01038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
lut = (struct drm_color_lut *)blob->data;
lut_size = blob->length / sizeof(struct drm_color_lut);
- if (__is_lut_linear(lut, lut_size)) {
- /* Set to bypass if lut is set to linear */
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
- return 0;
- }
-
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
{
int src;
struct irq_list_head *lh;
+ unsigned long irq_table_flags;
DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
-
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
/* The handler was removed from the table,
* it means it is safe to flush all the 'work'
* (because no code can schedule a new one). */
lh = &adev->dm.irq_handler_list_low_tab[src];
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
flush_work(&lh->work);
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..4304d9e408b8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
I2C_MOT_TRUE : I2C_MOT_FALSE;
enum ddc_result res;
- ssize_t read_bytes;
+ uint32_t read_bytes = msg->size;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
+ res = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
false,
I2C_MOT_UNDEF,
msg->address,
msg->buffer,
- msg->size);
- return read_bytes;
+ msg->size,
+ &read_bytes);
+ break;
case DP_AUX_NATIVE_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
msg->size);
break;
case DP_AUX_I2C_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
+ res = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
true,
mot,
msg->address,
msg->buffer,
- msg->size);
- return read_bytes;
+ msg->size,
+ &read_bytes);
+ break;
case DP_AUX_I2C_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- return msg->size;
+ if (res != DDC_RESULT_SUCESSFULL)
+ return -EIO;
+ return read_bytes;
}
static enum drm_connector_status
@@ -161,6 +165,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ if (amdgpu_dm_connector->edid) {
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
+ }
+
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
@@ -181,28 +190,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct edid *edid;
struct dc_sink *dc_sink;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ /* FIXME none of this is safe. we shouldn't touch aconnector here in
+ * atomic_check
+ */
+
/*
* TODO: Need to further figure out why ddc.algo is NULL while MST port exists
*/
if (!aconnector->port || !aconnector->port->aux.ddc.algo)
return;
- edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
-
- if (!edid) {
- drm_mode_connector_update_edid_property(
- &aconnector->base,
- NULL);
- return;
- }
-
- aconnector->edid = edid;
+ ASSERT(aconnector->edid);
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
@@ -215,9 +218,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
amdgpu_dm_add_sink_to_freesync_module(
connector, aconnector->edid);
-
- drm_mode_connector_update_edid_property(
- &aconnector->base, aconnector->edid);
}
static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +230,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
- struct dc_sink *dc_sink;
- struct dc_sink_init_data init_params = {
- .link = aconnector->dc_link,
- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
@@ -244,11 +240,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
}
aconnector->edid = edid;
+ }
+ if (!aconnector->dc_sink) {
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
- (uint8_t *)edid,
- (edid->extensions + 1) * EDID_LENGTH,
+ (uint8_t *)aconnector->edid,
+ (aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
dc_sink->priv = aconnector;
@@ -256,12 +258,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (aconnector->dc_sink)
amdgpu_dm_add_sink_to_freesync_module(
- connector, edid);
-
- drm_mode_connector_update_edid_property(
- &aconnector->base, edid);
+ connector, aconnector->edid);
}
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, aconnector->edid);
+
ret = drm_add_edid_modes(connector, aconnector->edid);
return ret;
@@ -424,14 +426,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
}
- if (aconnector->edid) {
- kfree(aconnector->edid);
- aconnector->edid = NULL;
- }
-
- drm_mode_connector_update_edid_property(
- &aconnector->base,
- NULL);
aconnector->mst_connected = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 985fe8c22875..10a5807a7e8b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
case 3:
switch (revision.minor) {
case 1:
- case 2:
result = get_firmware_info_v3_1(bp, info);
break;
+ case 2:
+ result = get_firmware_info_v3_2(bp, info);
+ break;
default:
break;
}
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_2 *firmware_info;
+ struct atom_display_controller_info_v4_1 *dce_info = NULL;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
+ struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
+ DATA_TABLES(firmwareinfo));
+
+ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!firmware_info || !dce_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(smu_info));
+ get_atom_data_table_revision(header, &revision);
+
+ if (revision.minor == 2) {
+ /* Vega12 */
+ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+ DATA_TABLES(smu_info));
+
+ if (!smu_info_v3_2)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+ } else if (revision.minor == 3) {
+ /* Vega20 */
+ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+
+ if (!smu_info_v3_3)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+ }
+
+ // We need to convert from 10KHz units into KHz units.
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+ /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
+ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+ if (info->pll_info.crystal_frequency == 0) {
+ if (revision.minor == 2)
+ info->pll_info.crystal_frequency = 27000;
+ else if (revision.minor == 3)
+ info->pll_info.crystal_frequency = 100000;
+ }
+ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+ /* Get GPU PLL VCO Clock */
+ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+ if (revision.minor == 2)
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
+ else if (revision.minor == 3)
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
+ }
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index eeb04471b2f5..6d1c4981a185 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1997,6 +1997,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
return true;
}
+bool dc_link_set_abm_disable(const struct dc_link *link)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+
+ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ return false;
+
+ abm->funcs->set_abm_immediate_disable(abm);
+
+ return true;
+}
+
bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
{
struct dc *core_dc = link->ctx->dc;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 49c2face1e7a..ae48d603ebd6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len)
+ uint32_t len,
+ uint32_t *read)
{
struct aux_payload read_payload = {
.i2c_over_aux = i2c,
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
.mot = mot
};
+ *read = 0;
+
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
BREAK_TO_DEBUGGER();
return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
ddc->ctx->i2caux,
ddc->ddc_pin,
&command)) {
- return (ssize_t)command.payloads->length;
+ *read = command.payloads->length;
+ return DDC_RESULT_SUCESSFULL;
}
return DDC_RESULT_FAILED_OPERATION;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ade5b8ee9c3c..132eef3826e2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -66,8 +66,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
{
struct dc *core_dc = dc;
- struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
- GFP_KERNEL);
+ struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
+ GFP_KERNEL);
if (NULL == plane_state)
return NULL;
@@ -120,7 +120,7 @@ static void dc_plane_state_free(struct kref *kref)
{
struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
destruct(plane_state);
- kfree(plane_state);
+ kvfree(plane_state);
}
void dc_plane_state_release(struct dc_plane_state *plane_state)
@@ -136,7 +136,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
static void dc_gamma_free(struct kref *kref)
{
struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
- kfree(gamma);
+ kvfree(gamma);
}
void dc_gamma_release(struct dc_gamma **gamma)
@@ -147,7 +147,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
struct dc_gamma *dc_create_gamma(void)
{
- struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+ struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
if (gamma == NULL)
goto alloc_fail;
@@ -167,7 +167,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
static void dc_transfer_func_free(struct kref *kref)
{
struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
- kfree(tf);
+ kvfree(tf);
}
void dc_transfer_func_release(struct dc_transfer_func *tf)
@@ -177,7 +177,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
struct dc_transfer_func *dc_create_transfer_func(void)
{
- struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+ struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
if (tf == NULL)
goto alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index fb4d9eafdc6e..dc34515ef01f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -132,6 +132,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 444558ca6533..162f6a6c4208 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -735,6 +735,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
if (info_frame->avi.valid) {
const uint32_t *content =
(const uint32_t *) &info_frame->avi.sb[0];
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
REG_WRITE(AFMT_AVI_INFO0, content[0]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 775d3bf0bd39..9150d2694450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
return 256 * ((pixels + 255) / 256);
}
+static void reset_lb_on_vblank(struct dc_context *ctx)
+{
+ uint32_t value, frame_count;
+ uint32_t retry = 0;
+ uint32_t status_pos =
+ dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+
+
+ /* Only if CRTC is enabled and counter is moving we wait for one frame. */
+ if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ /* Resetting LB on VBlank */
+ value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+ dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+ frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+
+
+ for (retry = 100; retry > 0; retry--) {
+ if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ break;
+ msleep(1);
+ }
+ if (!retry)
+ dm_error("Frame count did not increase for 100ms.\n");
+
+ /* Resetting LB on VBlank */
+ value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+ dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+ }
+
+}
+
static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
@@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
- if (compressor->options.bits.FBC_SUPPORT &&
- dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
- uint32_t reg_data;
- /* Turn off compression */
- reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
- set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
- dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
-
- /* Reset enum controller_id to undefined */
- compressor->attached_inst = 0;
- compressor->is_enabled = false;
-
- wait_for_fbc_state_changed(cp110, false);
+ if (compressor->options.bits.FBC_SUPPORT) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+
+ /* Sync line buffer - dce100/110 only*/
+ reset_lb_on_vblank(compressor->ctx);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 30dd62f0f5fa..d0575999f172 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -453,10 +453,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
} else {
/* 10 segments
- * segment is from 2^-10 to 2^0
+ * segment is from 2^-10 to 2^1
+ * We include an extra segment for range [2^0, 2^1). This is to
+ * ensure that colors with normalized values of 1 don't miss the
+ * LUT.
*/
region_start = -10;
- region_end = 0;
+ region_end = 1;
seg_distr[0] = 4;
seg_distr[1] = 4;
@@ -468,7 +471,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
- seg_distr[10] = -1;
+ seg_distr[10] = 0;
seg_distr[11] = -1;
seg_distr[12] = -1;
seg_distr[13] = -1;
@@ -1016,8 +1019,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, false);
+ dc_link_set_abm_disable(link);
+ }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 090b7a8dd67b..30b3a08b91be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len);
+ uint32_t len,
+ uint32_t *read);
enum ddc_result dal_ddc_service_write_dpcd_data(
struct ddc_service *ddc,
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9831cb5eaa7c..9b0a04f99ac8 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -113,9 +113,14 @@
#define AI_GREENLAND_P_A0 1
#define AI_GREENLAND_P_A1 2
+#define AI_UNKNOWN 0xFF
-#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN)
-#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN)
+#define AI_VEGA12_P_A0 20
+#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+
+#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
/* DCN1_0 */
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e7e374f56864..b3747a019deb 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1093,19 +1093,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
- GFP_KERNEL);
+ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+ GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1157,13 +1157,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
ret = true;
- kfree(coeff);
+ kvfree(coeff);
coeff_alloc_fail:
- kfree(axix_x);
+ kvfree(axix_x);
axix_x_alloc_fail:
- kfree(rgb_regamma);
+ kvfree(rgb_regamma);
rgb_regamma_alloc_fail:
- kfree(rgb_user);
+ kvfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
@@ -1192,19 +1192,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
- GFP_KERNEL);
+ curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1246,13 +1246,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
ret = true;
- kfree(coeff);
+ kvfree(coeff);
coeff_alloc_fail:
- kfree(axix_x);
+ kvfree(axix_x);
axix_x_alloc_fail:
- kfree(curve);
+ kvfree(curve);
curve_alloc_fail:
- kfree(rgb_user);
+ kvfree(rgb_user);
rgb_user_alloc_fail:
return ret;
@@ -1281,8 +1281,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
points->end_exponent = 7;
@@ -1302,11 +1303,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_regamma);
+ kvfree(rgb_regamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
points->end_exponent = 0;
@@ -1324,7 +1326,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_regamma);
+ kvfree(rgb_regamma);
}
rgb_regamma_alloc_fail:
return ret;
@@ -1348,8 +1350,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
@@ -1364,11 +1367,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_degamma);
+ kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
@@ -1382,7 +1386,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_degamma);
+ kvfree(rgb_degamma);
}
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 3ae3da4e7c14..de177ce8ca80 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
LIQUID_COOLING = 0x01
};
+struct atom_firmware_info_v3_2 {
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_sclk_in10khz;
+ uint32_t bootup_mclk_in10khz;
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint16_t bootup_vddc_mv;
+ uint16_t bootup_vddci_mv;
+ uint16_t bootup_mvddc_mv;
+ uint16_t bootup_vddgfx_mv;
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t reserved1[2];
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t reserved3;
+ uint16_t bootup_mvddq_mv;
+ uint16_t bootup_mvpp_mv;
+ uint32_t zfbstartaddrin16mb;
+ uint32_t reserved2[3];
+};
/*
***************************************************************************
@@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2
uint32_t rlc_gpu_timer_refclk;
};
-
+struct atom_gfx_info_v2_3 {
+ struct atom_common_table_header table_header;
+ uint8_t gfxip_min_ver;
+ uint8_t gfxip_max_ver;
+ uint8_t max_shader_engines;
+ uint8_t max_tile_pipes;
+ uint8_t max_cu_per_sh;
+ uint8_t max_sh_per_se;
+ uint8_t max_backends_per_se;
+ uint8_t max_texture_channel_caches;
+ uint32_t regaddr_cp_dma_src_addr;
+ uint32_t regaddr_cp_dma_src_addr_hi;
+ uint32_t regaddr_cp_dma_dst_addr;
+ uint32_t regaddr_cp_dma_dst_addr_hi;
+ uint32_t regaddr_cp_dma_command;
+ uint32_t regaddr_cp_status;
+ uint32_t regaddr_rlc_gpu_clock_32;
+ uint32_t rlc_gpu_timer_refclk;
+ uint8_t active_cu_per_sh;
+ uint8_t active_rb_per_se;
+ uint16_t gcgoldenoffset;
+ uint32_t rm21_sram_vmin_value;
+};
/*
***************************************************************************
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
uint8_t fw_ctf_polarity; // GPIO polarity for CTF
};
+struct atom_smu_info_v3_2 {
+ struct atom_common_table_header table_header;
+ uint8_t smuip_min_ver;
+ uint8_t smuip_max_ver;
+ uint8_t smu_rsd1;
+ uint8_t gpuclk_ss_mode;
+ uint16_t sclk_ss_percentage;
+ uint16_t sclk_ss_rate_10hz;
+ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
+ uint16_t gpuclk_ss_rate_10hz;
+ uint32_t core_refclk_10khz;
+ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
+ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
+ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
+ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
+ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
+ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
+ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
+ uint16_t smugoldenoffset;
+ uint32_t gpupll_vco_freq_10khz;
+ uint32_t bootup_smnclk_10khz;
+ uint32_t bootup_socclk_10khz;
+ uint32_t bootup_mp0clk_10khz;
+ uint32_t bootup_mp1clk_10khz;
+ uint32_t bootup_lclk_10khz;
+ uint32_t bootup_dcefclk_10khz;
+ uint32_t ctf_threshold_override_value;
+ uint32_t reserved[5];
+};
+
+struct atom_smu_info_v3_3 {
+ struct atom_common_table_header table_header;
+ uint8_t smuip_min_ver;
+ uint8_t smuip_max_ver;
+ uint8_t smu_rsd1;
+ uint8_t gpuclk_ss_mode;
+ uint16_t sclk_ss_percentage;
+ uint16_t sclk_ss_rate_10hz;
+ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
+ uint16_t gpuclk_ss_rate_10hz;
+ uint32_t core_refclk_10khz;
+ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
+ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
+ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
+ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
+ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
+ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
+ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
+ uint16_t smugoldenoffset;
+ uint32_t gpupll_vco_freq_10khz;
+ uint32_t bootup_smnclk_10khz;
+ uint32_t bootup_socclk_10khz;
+ uint32_t bootup_mp0clk_10khz;
+ uint32_t bootup_mp1clk_10khz;
+ uint32_t bootup_lclk_10khz;
+ uint32_t bootup_dcefclk_10khz;
+ uint32_t ctf_threshold_override_value;
+ uint32_t syspll3_0_vco_freq_10khz;
+ uint32_t syspll3_1_vco_freq_10khz;
+ uint32_t bootup_fclk_10khz;
+ uint32_t bootup_waflclk_10khz;
+ uint32_t reserved[3];
+};
+
/*
***************************************************************************
Data Table smc_dpm_info structure
@@ -1264,9 +1382,9 @@ struct atom_smc_dpm_info_v4_1
uint8_t ledpin2;
uint8_t padding8_4;
- uint8_t gfxclkspreadenabled;
- uint8_t gfxclkspreadpercent;
- uint16_t gfxclkspreadfreq;
+ uint8_t pllgfxclkspreadenabled;
+ uint8_t pllgfxclkspreadpercent;
+ uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@@ -1276,9 +1394,12 @@ struct atom_smc_dpm_info_v4_1
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
- uint32_t boardreserved[3];
-};
+ uint8_t acggfxclkspreadenabled;
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
+ uint32_t boardreserved[10];
+};
/*
***************************************************************************
@@ -1860,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK
};
+enum atom_smu11_syspll_id {
+ SMU11_SYSPLL0_ID = 0,
+ SMU11_SYSPLL1_0_ID = 1,
+ SMU11_SYSPLL1_1_ID = 2,
+ SMU11_SYSPLL1_2_ID = 3,
+ SMU11_SYSPLL2_ID = 4,
+ SMU11_SYSPLL3_0_ID = 5,
+ SMU11_SYSPLL3_1_ID = 6,
+};
+
+
+enum atom_smu11_syspll0_clock_id {
+ SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
+ SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
+ SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
+ SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
+ SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
+ SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
+};
+
+
+enum atom_smu11_syspll1_0_clock_id {
+ SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
+};
+
+enum atom_smu11_syspll1_1_clock_id {
+ SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b
+};
+
+enum atom_smu11_syspll1_2_clock_id {
+ SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK
+};
+
+enum atom_smu11_syspll2_clock_id {
+ SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK
+};
+
+enum atom_smu11_syspll3_0_clock_id {
+ SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK
+ SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK
+ SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK
+};
+
+enum atom_smu11_syspll3_1_clock_id {
+ SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK
+ SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK
+ SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
+};
+
struct atom_get_smu_clock_info_output_parameters_v3_1
{
union {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index faf9c880e4f7..210fb3ecd213 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -32,7 +32,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
vega10_thermal.o smu10_hwmgr.o pp_psm.o\
vega12_processpptables.o vega12_hwmgr.o \
- vega12_powertune.o vega12_thermal.o \
+ vega12_thermal.o \
pp_overdriver.o smu_helper.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 55f9b30513ff..ad42caac033e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -616,9 +616,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->ledpin1 = info->ledpin1;
param->ledpin2 = info->ledpin2;
- param->gfxclkspreadenabled = info->gfxclkspreadenabled;
- param->gfxclkspreadpercent = info->gfxclkspreadpercent;
- param->gfxclkspreadfreq = info->gfxclkspreadfreq;
+ param->pllgfxclkspreadenabled = info->pllgfxclkspreadenabled;
+ param->pllgfxclkspreadpercent = info->pllgfxclkspreadpercent;
+ param->pllgfxclkspreadfreq = info->pllgfxclkspreadfreq;
param->uclkspreadenabled = info->uclkspreadenabled;
param->uclkspreadpercent = info->uclkspreadpercent;
@@ -628,5 +628,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->socclkspreadpercent = info->socclkspreadpercent;
param->socclkspreadfreq = info->socclkspreadfreq;
+ param->acggfxclkspreadenabled = info->acggfxclkspreadenabled;
+ param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
+ param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index a957d8f08029..8df1e84f27c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -192,9 +192,9 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t ledpin1;
uint8_t ledpin2;
- uint8_t gfxclkspreadenabled;
- uint8_t gfxclkspreadpercent;
- uint16_t gfxclkspreadfreq;
+ uint8_t pllgfxclkspreadenabled;
+ uint8_t pllgfxclkspreadpercent;
+ uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@@ -203,6 +203,10 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t socclkspreadenabled;
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
+
+ uint8_t acggfxclkspreadenabled;
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 2b0c366d6149..18b5b2ff47fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -79,12 +79,13 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[5] =
+static const struct profile_mode_setting smu7_profiling[6] =
{{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
{1, 0, 5, 30, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0},
};
/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
@@ -3374,7 +3375,8 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
"Failed to start pm status log!",
return -1);
- msleep_interruptible(20);
+ /* Sampling period from 50ms to 4sec */
+ msleep_interruptible(200);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),
@@ -4742,23 +4744,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
- break;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
}
}
- if (i == dep_table->count)
+ if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ }
dep_table = table_info->vdd_dep_on_sclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
- break;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
}
}
- if (i == dep_table->count)
+ if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ }
}
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
@@ -4859,6 +4865,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
for (i = 0; i < len; i++) {
+ if (i == hwmgr->power_profile_mode) {
+ size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+ i, profile_name[i], "*",
+ data->current_profile_setting.sclk_up_hyst,
+ data->current_profile_setting.sclk_down_hyst,
+ data->current_profile_setting.sclk_activity,
+ data->current_profile_setting.mclk_up_hyst,
+ data->current_profile_setting.mclk_down_hyst,
+ data->current_profile_setting.mclk_activity);
+ continue;
+ }
if (smu7_profiling[i].bupdate_sclk)
size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
@@ -4878,24 +4895,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
"-", "-", "-");
}
- size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
- i, profile_name[i],
- data->custom_profile_setting.sclk_up_hyst,
- data->custom_profile_setting.sclk_down_hyst,
- data->custom_profile_setting.sclk_activity,
- data->custom_profile_setting.mclk_up_hyst,
- data->custom_profile_setting.mclk_down_hyst,
- data->custom_profile_setting.mclk_activity);
-
- size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
- "*", "CURRENT",
- data->current_profile_setting.sclk_up_hyst,
- data->current_profile_setting.sclk_down_hyst,
- data->current_profile_setting.sclk_activity,
- data->current_profile_setting.mclk_up_hyst,
- data->current_profile_setting.mclk_down_hyst,
- data->current_profile_setting.mclk_activity);
-
return size;
}
@@ -4934,16 +4933,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
if (size < 8)
return -EINVAL;
- data->custom_profile_setting.bupdate_sclk = input[0];
- data->custom_profile_setting.sclk_up_hyst = input[1];
- data->custom_profile_setting.sclk_down_hyst = input[2];
- data->custom_profile_setting.sclk_activity = input[3];
- data->custom_profile_setting.bupdate_mclk = input[4];
- data->custom_profile_setting.mclk_up_hyst = input[5];
- data->custom_profile_setting.mclk_down_hyst = input[6];
- data->custom_profile_setting.mclk_activity = input[7];
- if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
- memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
+ tmp.bupdate_sclk = input[0];
+ tmp.sclk_up_hyst = input[1];
+ tmp.sclk_down_hyst = input[2];
+ tmp.sclk_activity = input[3];
+ tmp.bupdate_mclk = input[4];
+ tmp.mclk_up_hyst = input[5];
+ tmp.mclk_down_hyst = input[6];
+ tmp.mclk_activity = input[7];
+ if (!smum_update_dpm_settings(hwmgr, &tmp)) {
+ memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;
}
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f40179c9ca97..b8d0bb378595 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -325,7 +325,6 @@ struct smu7_hwmgr {
uint16_t mem_latency_high;
uint16_t mem_latency_low;
uint32_t vr_config;
- struct profile_mode_setting custom_profile_setting;
struct profile_mode_setting current_profile_setting;
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 03bc7453f3b1..d9e92e306535 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -852,12 +852,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- n = (n & 0xff) << 8;
-
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PkgPwrSetLimit, n);
+ PPSMC_MSG_PkgPwrSetLimit, n<<8);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 75a465f771f0..7b26607c646a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -319,13 +319,13 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
&size, &frev, &crev);
- if (crev != 9) {
- pr_err("Unsupported IGP table: %d %d\n", frev, crev);
+ if (info == NULL) {
+ pr_err("Could not retrieve the Integrated System Info Table!\n");
return -EINVAL;
}
- if (info == NULL) {
- pr_err("Could not retrieve the Integrated System Info Table!\n");
+ if (crev != 9) {
+ pr_err("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 15ce1e825021..200de46bd06b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -33,7 +33,6 @@
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "cgs_common.h"
-#include "vega12_powertune.h"
#include "vega12_inc.h"
#include "pp_soc15.h"
#include "pppcielanes.h"
@@ -893,6 +892,28 @@ static int vega12_odn_initialize_default_settings(
return 0;
}
+static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
+ uint32_t adjust_percent)
+{
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+}
+
+static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+ int adjust_percent, result = 0;
+
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
+ adjust_percent =
+ hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+ hwmgr->platform_descriptor.TDPAdjustment :
+ (-1 * hwmgr->platform_descriptor.TDPAdjustment);
+ result = vega12_set_overdrive_target_percentage(hwmgr,
+ (uint32_t)adjust_percent);
+ }
+ return result;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
deleted file mode 100644
index 76e60c0181ac..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
+++ /dev/null
@@ -1,1364 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "vega12_hwmgr.h"
-#include "vega12_powertune.h"
-#include "vega12_smumgr.h"
-#include "vega12_ppsmc.h"
-#include "vega12_inc.h"
-#include "pp_debug.h"
-#include "pp_soc15.h"
-
-static const struct vega12_didt_config_reg SEDiDtTuningCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
-
- /* DIDT_TD */
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
-
- /* DIDT_DB */
- { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
- { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl3Config_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /*DIDT_SQ_CTRL3 */
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_TCP_CTRL3 */
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_TD_CTRL3 */
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_DB_CTRL3 */
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl2Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
-
- /* DIDT_TD */
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- /* DIDT_DB */
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl1Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_TD */
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_DB */
- { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEDiDtWeightConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
- { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
- { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
-
- /* DIDT_TD */
- { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
- { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
- { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
- { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
-
- /* DIDT_DB */
- { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
- { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
- { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl0Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_TD */
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_DB */
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEDiDtStallCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_TD */
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_DB */
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtStallPatternConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ_STALL_PATTERN_1_2 */
- { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_SQ_STALL_PATTERN_3_4 */
- { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_SQ_STALL_PATTERN_5_6 */
- { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_SQ_STALL_PATTERN_7 */
- { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_TCP_STALL_PATTERN_1_2 */
- { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_TCP_STALL_PATTERN_3_4 */
- { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_TCP_STALL_PATTERN_5_6 */
- { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_TCP_STALL_PATTERN_7 */
- { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_TD_STALL_PATTERN_1_2 */
- { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_TD_STALL_PATTERN_3_4 */
- { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_TD_STALL_PATTERN_5_6 */
- { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_TD_STALL_PATTERN_7 */
- { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_DB_STALL_PATTERN_1_2 */
- { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_DB_STALL_PATTERN_3_4 */
- { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_DB_STALL_PATTERN_5_6 */
- { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_DB_STALL_PATTERN_7 */
- { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SELCacConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
- /* TD */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
- /* TCP */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
- /* DB */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEEDCStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
- /* TD */
- { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* TCP */
- { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* DB */
- { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCForceStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* TD */
- { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
- { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCStallDelayConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* TD */
- { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* TCP */
- { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* DB */
- { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCThresholdConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
- { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
- { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
- { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlResetConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlForceStallConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
-
- /* TD */
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg GCDiDtDroopCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg GCDiDtCtrl0Config_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg PSMSEEDCStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC STALL PATTERNs */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
-
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
-
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCStallDelayConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC STALL DELAYs */
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
-
- { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCThresholdConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC THRESHOLD */
- { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCCtrlResetConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC CTRL */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC CTRL */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCThresholdConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCDroopCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCCtrlResetConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg AvfsPSMResetConfig_vega12[]=
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
- { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
- { 0x16A06, 0x00000001, 0x0, 0x02000000 },
- { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg AvfsPSMInitConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
- { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
- { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
- { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
- { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
- { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
- { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static int vega12_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs, enum vega12_didt_config_reg_type reg_type)
-{
- uint32_t data;
-
- PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega12_program_didt_config_registers] Invalid config register table!", return -EINVAL);
-
- while (config_regs->offset != 0xFFFFFFFF) {
- switch (reg_type) {
- case VEGA12_CONFIGREG_DIDT:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
- break;
- case VEGA12_CONFIGREG_GCCAC:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
- break;
- case VEGA12_CONFIGREG_SECAC:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
- break;
- default:
- return -EINVAL;
- }
-
- config_regs++;
- }
-
- return 0;
-}
-
-static int vega12_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs)
-{
- uint32_t data;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- data = cgs_read_register(hwmgr->device, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_register(hwmgr->device, config_regs->offset, data);
- config_regs++;
- }
-
- return 0;
-}
-
-static void vega12_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
-{
- uint32_t data;
- int result;
- uint32_t en = (enable ? 1 : 0);
- uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
-
- if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~SQ_Enable_MASK;
- didt_block_info |= en << SQ_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~DB_Enable_MASK;
- didt_block_info |= en << DB_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~TD_Enable_MASK;
- didt_block_info |= en << TD_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~TCP_Enable_MASK;
- didt_block_info |= en << TCP_Enable_SHIFT;
- }
-
-#if 0
- if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
- }
-#endif
-
- if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
- if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
- }
-
-#if 0
- if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
- }
-#endif
- }
-
- if (enable) {
- /* For Vega12, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
- PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
- }
-}
-
-static int vega12_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
-
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SELCacConfig_Vega12, VEGA12_CONFIGREG_SECAC);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
-{
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
-
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- vega12_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega12);
- if (PP_CAP(PHM_PlatformCaps_GCEDC))
- vega12_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega12);
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
-
- return 0;
-}
-
-static int vega12_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t data;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- data = 0x00000000;
- cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- return 0;
-}
-
-static int vega12_enable_se_edc_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_se_edc_config(struct pp_hwmgr *hwmgr)
-{
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0;
- uint32_t count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega12);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega12);
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega12);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
-
- return 0;
-}
-
-static int vega12_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t data;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- data = 0x00000000;
- cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- return 0;
-}
-
-static int vega12_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg;
- int result;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- result = vega12_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- if (0 != result)
- return result;
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = vega12_disable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
-
- return 0;
-}
-
-int vega12_enable_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DIDT].supported) {
- if (data->smu_features[GNLD_DIDT].enabled)
- PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
-
- switch (data->registry_data.didt_mode) {
- case 0:
- result = vega12_enable_cac_driving_se_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
- break;
- case 2:
- result = vega12_enable_psm_gc_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
- break;
- case 3:
- result = vega12_enable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
- break;
- case 1:
- case 4:
- case 5:
- result = vega12_enable_psm_gc_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
- break;
- case 6:
- result = vega12_enable_se_edc_force_stall_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
-#if 0
- if (0 == result) {
- result = vega12_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
- data->smu_features[GNLD_DIDT].enabled = true;
- }
-#endif
- }
-
- return result;
-}
-
-int vega12_disable_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DIDT].supported) {
- if (!data->smu_features[GNLD_DIDT].enabled)
- PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
-
- switch (data->registry_data.didt_mode) {
- case 0:
- result = vega12_disable_cac_driving_se_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
- break;
- case 2:
- result = vega12_disable_psm_gc_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
- break;
- case 3:
- result = vega12_disable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
- break;
- case 1:
- case 4:
- case 5:
- result = vega12_disable_psm_gc_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
- break;
- case 6:
- result = vega12_disable_se_edc_force_stall_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
- if (0 == result) {
- result = vega12_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
- data->smu_features[GNLD_DIDT].enabled = false;
- }
- }
-
- return result;
-}
-
-int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_PPT].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
-
- return 0;
-}
-
-int vega12_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_tdp_table *tdp_table = table_info->tdp_table;
- uint32_t default_pwr_limit =
- (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
- int result = 0;
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
- "Attempt to enable PPT feature Failed!",
- data->smu_features[GNLD_PPT].supported = false);
-
- if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
- "Attempt to enable PPT feature Failed!",
- data->smu_features[GNLD_TDC].supported = false);
-
- result = vega12_set_power_limit(hwmgr, default_pwr_limit);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to set Default Power Limit in SMC!",
- return result);
- }
-
- return result;
-}
-
-int vega12_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
- "Attempt to disable PPT feature Failed!",
- data->smu_features[GNLD_PPT].supported = false);
-
- if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
- "Attempt to disable PPT feature Failed!",
- data->smu_features[GNLD_TDC].supported = false);
- }
-
- return 0;
-}
-
-static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
- uint32_t adjust_percent)
-{
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
-}
-
-int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
- int adjust_percent, result = 0;
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- adjust_percent =
- hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
- hwmgr->platform_descriptor.TDPAdjustment :
- (-1 * hwmgr->platform_descriptor.TDPAdjustment);
- result = vega12_set_overdrive_target_percentage(hwmgr,
- (uint32_t)adjust_percent);
- }
- return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
deleted file mode 100644
index 78d31a6747dd..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _VEGA12_POWERTUNE_H_
-#define _VEGA12_POWERTUNE_H_
-
-enum vega12_didt_config_reg_type {
- VEGA12_CONFIGREG_DIDT = 0,
- VEGA12_CONFIGREG_GCCAC,
- VEGA12_CONFIGREG_SECAC
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-struct vega12_didt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
-};
-
-int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
-int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
-int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
-
-int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
-int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
-
-#endif /* _VEGA12_POWERTUNE_H_ */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index e7d794980b84..b34113f45904 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -208,9 +208,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
- ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled;
- ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent;
- ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq;
+ ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table.pllgfxclkspreadenabled;
+ ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table.pllgfxclkspreadpercent;
+ ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table.pllgfxclkspreadfreq;
ppsmc_pptable->UclkSpreadEnabled = 0;
ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
@@ -220,6 +220,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
+ ppsmc_pptable->AcgGfxclkSpreadEnabled = smc_dpm_table.acggfxclkspreadenabled;
+ ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
+ ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
+
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index cd2e503a87da..2f8a3b983cce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -127,7 +127,7 @@
#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
-#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
+#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT)
#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
@@ -412,8 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[15];
+ uint32_t Reserved[14];
@@ -481,9 +483,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t GfxclkSpreadEnabled;
- uint8_t GfxclkSpreadPercent;
- uint16_t GfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -493,7 +495,11 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint32_t BoardReserved[3];
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
+
+ uint32_t BoardReserved[10];
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 55cd204c1789..651a3f28734b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -30,8 +30,7 @@
#include "ppatomctrl.h"
#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -392,8 +391,7 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
struct cgs_firmware_info info = {0};
int ret;
- ret = cgs_get_firmware_info(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU,
&info);
if (ret || !info.kptr)
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3aa65bdecb0e..684ac626ac53 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -74,6 +74,7 @@ config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF && RC_CORE
select DRM_KMS_HELPER
+ imply EXTCON
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 498d5948d1a8..9837c8d69e69 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
}
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
fallback:
/*
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d25c42f22db..c825c76edc1d 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->connectors[i].state);
state->connectors[i].ptr = NULL;
state->connectors[i].state = NULL;
+ state->connectors[i].old_state = NULL;
+ state->connectors[i].new_state = NULL;
drm_connector_put(connector);
}
@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->crtcs[i].ptr = NULL;
state->crtcs[i].state = NULL;
+ state->crtcs[i].old_state = NULL;
+ state->crtcs[i].new_state = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->planes[i].state);
state->planes[i].ptr = NULL;
state->planes[i].state = NULL;
+ state->planes[i].old_state = NULL;
+ state->planes[i].new_state = NULL;
}
for (i = 0; i < state->num_private_objs; i++) {
@@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->private_objs[i].state);
state->private_objs[i].ptr = NULL;
state->private_objs[i].state = NULL;
+ state->private_objs[i].old_state = NULL;
+ state->private_objs[i].new_state = NULL;
}
state->num_private_objs = 0;
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 02a50929af67..e7f4fe2848a5 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
+ int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
- &tmds_oen, sizeof(tmds_oen));
- if (ret) {
- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
- enable ? "enable" : "disable");
- return ret;
+ /*
+ * LSPCON adapters in low-power state may ignore the first write, so
+ * read back and verify the written value a few times.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t tmp;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+ enable ? "enable" : "disable",
+ retry + 1);
+ return ret;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmp, sizeof(tmp));
+ if (ret) {
+ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+ enable ? "enabling" : "disabling",
+ retry + 1);
+ return ret;
+ }
+
+ if (tmp == tmds_oen)
+ return 0;
}
- return 0;
+ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+ enable ? "enabling" : "disabling");
+
+ return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..c2c21d839727 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
if (!minor)
return;
- name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+ name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
if (!name)
return;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 39ac15ce4702..9e2ae02f31e0 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
return -EINVAL;
/* overflow checks for 32bit size calculations */
- /* NOTE: DIV_ROUND_UP() can overflow */
+ if (args->bpp > U32_MAX - 8)
+ return -EINVAL;
cpp = DIV_ROUND_UP(args->bpp, 8);
- if (!cpp || cpp > 0xffffffffU / args->width)
+ if (cpp > U32_MAX / args->width)
return -EINVAL;
stride = cpp * args->width;
- if (args->height > 0xffffffffU / stride)
+ if (args->height > U32_MAX / stride)
return -EINVAL;
/* test for wrap-around */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..39f1db4acda4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
info->max_tmds_clock = 0;
info->dvi_dual = false;
info->has_hdmi_infoframe = false;
+ memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
}
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
u32 quirks = edid_get_quirks(edid);
+ drm_reset_display_info(connector);
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
- /* driver figures it out in this case */
- info->bpc = 0;
- info->color_formats = 0;
- info->cea_rev = 0;
- info->max_tmds_clock = 0;
- info->dvi_dual = false;
- info->has_hdmi_infoframe = false;
-
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e394799979a6..6d9b9453707c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return -ENOMEM;
filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
priv->filp = filp;
priv->pid = get_pid(task_pid(current));
priv->minor = minor;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..f0e79178bde6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -26,20 +27,6 @@
#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
-#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
-
-/*
- * exynos specific framebuffer structure.
- *
- * @fb: drm framebuffer obejct.
- * @exynos_gem: array of exynos specific gem object containing a gem object.
- */
-struct exynos_drm_fb {
- struct drm_framebuffer fb;
- struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- dma_addr_t dma_addr[MAX_FB_BUFFER];
-};
-
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem *exynos_gem)
{
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
return 0;
}
-static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
-{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- unsigned int i;
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
- struct drm_gem_object *obj;
-
- if (exynos_fb->exynos_gem[i] == NULL)
- continue;
-
- obj = &exynos_fb->exynos_gem[i]->base;
- drm_gem_object_unreference_unlocked(obj);
- }
-
- kfree(exynos_fb);
- exynos_fb = NULL;
-}
-
-static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-
- return drm_gem_handle_create(file_priv,
- &exynos_fb->exynos_gem[0]->base, handle);
-}
-
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
- .destroy = exynos_drm_fb_destroy,
- .create_handle = exynos_drm_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
struct drm_framebuffer *
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct exynos_drm_gem **exynos_gem,
int count)
{
- struct exynos_drm_fb *exynos_fb;
+ struct drm_framebuffer *fb;
int i;
int ret;
- exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb)
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
if (ret < 0)
goto err;
- exynos_fb->exynos_gem[i] = exynos_gem[i];
- exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
- + mode_cmd->offsets[i];
+ fb->obj[i] = &exynos_gem[i]->base;
}
- drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
- ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
DRM_ERROR("failed to initialize framebuffer\n");
goto err;
}
- return &exynos_fb->fb;
+ return fb;
err:
- kfree(exynos_fb);
+ kfree(fb);
return ERR_PTR(ret);
}
@@ -191,12 +145,13 @@ err:
dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
- struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ struct exynos_drm_gem *exynos_gem;
if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
return 0;
- return exynos_fb->dma_addr[index];
+ exynos_gem = to_exynos_gem(fb->obj[index]);
+ return exynos_gem->dma_addr + fb->offsets[index];
}
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index abd84cbcf1c2..09c4bc0b1859 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_mode_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
- encoder->bridge = hdata->bridge;
- hdata->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret)
DRM_ERROR("Failed to attach bridge\n");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 257299ec95c4..272c79f5f5bf 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + fb->pitches[0];
- chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
+ chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
}
} else {
luma_addr[1] = 0;
@@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&ctx->reg_slock, flags);
+ vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
/* chroma plane for NV12/NV21 is half the height of the luma plane */
- vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+ vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
VP_IMG_VSIZE(fb->height / 2));
vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
- vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
vp_reg_write(ctx, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(state->src.x));
- vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
-
vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
+
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
+ vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
+ vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
+ vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
+ vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
}
@@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+ vp_reg_read(ctx, VP_SHADOW_UPDATE))
+ goto out;
+
+ base = mixer_reg_read(ctx, MXR_CFG);
+ shadow = mixer_reg_read(ctx, MXR_CFG_S);
+ if (base != shadow)
+ goto out;
+
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index c311f571bdf9..189cfa2470a8 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -47,6 +47,7 @@
#define MXR_MO 0x0304
#define MXR_RESOLUTION 0x0310
+#define MXR_CFG_S 0x2004
#define MXR_GRAPHIC0_BASE_S 0x2024
#define MXR_GRAPHIC1_BASE_S 0x2044
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index db6b94dda5df..d85939bd7b47 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
s->workload->pending_events);
+ patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index dd96ffc878ac..6d8180e8d1e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
if (IS_BROADWELL(dev_priv))
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+ /* Disable Primary/Sprite/Cursor plane */
+ for_each_pipe(dev_priv, pipe) {
+ vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
+ vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
+ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+ }
+
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index b555eb26f9ce..6f4f8e941fc2 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
struct intel_vgpu_fb_info *fb_info)
{
gvt_dmabuf->drm_format = fb_info->drm_format;
+ gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
gvt_dmabuf->width = fb_info->width;
gvt_dmabuf->height = fb_info->height;
gvt_dmabuf->stride = fb_info->stride;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 6b50fe78dc1b..1c120683e958 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
@@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
@@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
- }
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("invalid gma address: %lx\n",
- (unsigned long)plane->base);
+ gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
+ plane->base);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d29281231507..78e55aafc8bc 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
false, 0, mm->vgpu);
}
+static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
+ struct intel_gvt_gtt_entry *entry, unsigned long index)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+
+ GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+
+ pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
+}
+
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
@@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
+static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
+ struct intel_gvt_gtt_entry *entry)
+{
+ struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long pfn;
+
+ pfn = pte_ops->get_pfn(entry);
+ if (pfn != vgpu->gvt->gtt.scratch_mfn)
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
+ pfn << PAGE_SHIFT);
+}
+
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
- m = e;
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
+ m = e;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
@@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
- } else
+ } else {
+ ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ }
out:
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
@@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return PTR_ERR(gtt->ggtt_mm);
}
- intel_vgpu_reset_ggtt(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, false);
return create_scratch_page_tree(vgpu);
}
@@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
+ * @invalidate_old: invalidate old entries
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries.
*
*/
-void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry old_entry;
u32 index;
u32 num_entries;
@@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
- while (num_entries--)
+ while (num_entries--) {
+ if (invalidate_old) {
+ ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
+ ggtt_invalidate_pte(vgpu, &old_entry);
+ }
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
+ }
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
- while (num_entries--)
+ while (num_entries--) {
+ if (invalidate_old) {
+ ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
+ ggtt_invalidate_pte(vgpu, &old_entry);
+ }
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
+ }
ggtt_invalidate(dev_priv);
}
@@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
* removing the shadow pages.
*/
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, true);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a8b369cd352b..3792f2b7f4ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
-void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8c5d5d005854..a33c1c3e4a21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
+ /* fall through */
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c16a492449d7..1466d8769ec9 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
- return 0;
+ return -ENOTTY;
}
static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 84ca369f15a5..3b4daafebdcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
- return ret;
+ goto err_perf;
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_ggtt;
+ goto err_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_ggtt;
+ goto err_ggtt;
}
ret = i915_ggtt_init_hw(dev_priv);
if (ret)
- return ret;
+ goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto out_ggtt;
+ goto err_ggtt;
}
pci_set_master(pdev);
@@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto out_ggtt;
+ goto err_ggtt;
}
}
@@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto out_ggtt;
+ goto err_ggtt;
}
}
@@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
- goto out_ggtt;
+ goto err_ggtt;
return 0;
-out_ggtt:
+err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
-
+err_perf:
+ i915_perf_fini(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8c170db8495d..0414228cd2b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kfree(lut);
+ kmem_cache_free(eb->i915->luts, lut);
goto err_obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d596a8302ca3..854bd51b9478 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ if (!args->user_size)
+ return -EINVAL;
+
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d8feb9053e0c..f0519e31543a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->pmu.lock, flags);
spin_lock(&kdev->power.lock);
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_jiffies_last =
- kdev->power.suspended_jiffies;
+ /*
+ * After the above branch intel_runtime_pm_get_if_in_use failed
+ * to get the runtime PM reference we cannot assume we are in
+ * runtime suspend since we can either: a) race with coming out
+ * of it before we took the power.lock, or b) there are other
+ * states than suspended which can bring us here.
+ *
+ * We need to double-check that we are indeed currently runtime
+ * suspended and if not we cannot do better than report the last
+ * known RC6 value.
+ */
+ if (kdev->power.runtime_status == RPM_SUSPENDED) {
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
- val = kdev->power.suspended_jiffies -
- i915->pmu.suspended_jiffies_last;
- val += jiffies - kdev->power.accounting_timestamp;
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
- spin_unlock(&kdev->power.lock);
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
- val = jiffies_to_nsecs(val);
- val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ }
+ spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e6a8c0ee7df1..8a69a9275e28 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7326,6 +7326,9 @@ enum {
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
+#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
+#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 709d6ca68074..3ea566f99450 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
- if (!IS_GEN9_BC(dev_priv))
+ if (!IS_GEN9(dev_priv))
return;
i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c5c7530ba157..447b721c3be9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
return;
aux_channel = child->aux_channel;
- ddc_pin = child->ddc_pin;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
-
- sanitize_ddc_pin(dev_priv, port);
+ ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
+ if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
+ info->alternate_ddc_pin = ddc_pin;
+ sanitize_ddc_pin(dev_priv, port);
+ } else {
+ DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), ddc_pin);
+ }
}
if (is_dp) {
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index fc8b2c6e3508..704ddb4d3ca7 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
}
}
- /* According to BSpec, "The CD clock frequency must be at least twice
+ /*
+ * According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ *
+ * FIXME: Check the actual, not default, BCLK being used.
+ *
+ * FIXME: This does not depend on ->has_audio because the higher CDCLK
+ * is required for audio probe, also when there are no audio capable
+ * displays connected at probe time. This leads to unnecessarily high
+ * CDCLK when audio is not required.
+ *
+ * FIXME: This limit is only applied when there are displays connected
+ * at probe time. If we probe without displays, we'll still end up using
+ * the platform minimum CDCLK, failing audio probe.
*/
- if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2290,9 +2302,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
+static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int vco, i;
+
+ vco = intel_state->cdclk.logical.vco;
+ if (!vco)
+ vco = dev_priv->skl_preferred_vco_freq;
+
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ if (!crtc_state->base.enable)
+ continue;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ continue;
+
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ switch (crtc_state->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+ }
+
+ return vco;
+}
+
static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
@@ -2300,9 +2347,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
if (min_cdclk < 0)
return min_cdclk;
- vco = intel_state->cdclk.logical.vco;
- if (!vco)
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = skl_dpll0_vco(intel_state);
/*
* FIXME should also account for plane ratio
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 41e6c75a7f3c..f9550ea46c26 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,6 +35,7 @@
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b48fd2561fe..56004ffbd8bb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15178,6 +15178,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+ crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
+ crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9a4a51e79fa1..b7b4cfdeb974 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1881,26 +1881,6 @@ found:
reduce_m_n);
}
- /*
- * DPLL0 VCO may need to be adjusted to get the correct
- * clock for eDP. This will affect cdclk as well.
- */
- if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
- int vco;
-
- switch (pipe_config->port_clock / 2) {
- case 108000:
- case 216000:
- vco = 8640000;
- break;
- default:
- vco = 8100000;
- break;
- }
-
- to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
- }
-
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d4368589b355..a80fbad9be0f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
* check the condition before the timeout.
*/
#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
int ret__; \
might_sleep(); \
for (;;) { \
- bool expired__ = time_after(jiffies, timeout__); \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
OP; \
if (COND) { \
ret__ = 0; \
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4ba139c27fba..f7c25828d3bb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+ /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+ if (IS_GEN9_LP(dev_priv))
+ WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..6467a5cc2ca3 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
return;
intel_fbdev_sync(ifbdev);
- if (ifbdev->vma)
+ if (ifbdev->vma || ifbdev->helper.deferred_setup)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 697af5add78b..8704f7f8d072 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
memset(port, 0, sizeof(*port));
port++;
}
+
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -880,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data)
head = execlists->csb_head;
tail = READ_ONCE(buf[write_idx]);
+ rmb(); /* Hopefully paired with a wmb() in HW */
}
GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
engine->name,
@@ -1001,6 +1006,11 @@ static void execlists_submission_tasklet(unsigned long data)
if (fw)
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+
+ /* If the engine is now idle, so should be the flag; and vice versa. */
+ GEM_BUG_ON(execlists_is_active(&engine->execlists,
+ EXECLISTS_ACTIVE_USER) ==
+ !port_isset(engine->execlists.port));
}
static void queue_request(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d35d2d50f595..8691c86f579c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -326,7 +326,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC6\n");
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6e5e1aa54ce1..b001699297c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
blend_setup(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 9893e43ba6c5..76b96081916f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
return i;
}
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
+ uint64_t modifier)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
/* MDP capabilities */
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 7a03a9489708..8baba30d6c65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -173,6 +173,7 @@ struct msm_dsi_host {
bool registered;
bool power_on;
+ bool enabled;
int irq;
};
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
case MIPI_DSI_FMT_RGB666_PACKED:
- case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
default: return CMD_DST_FORMAT_RGB888;
}
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
{
+ u32 ret = 0;
+ struct device *dev = &msm_host->pdev->dev;
+
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
reinit_completion(&msm_host->video_comp);
- wait_for_completion_timeout(&msm_host->video_comp,
+ ret = wait_for_completion_timeout(&msm_host->video_comp,
msecs_to_jiffies(70));
+ if (ret <= 0)
+ dev_err(dev, "wait for video done timed out\n");
+
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
- if (msm_host->power_on) {
+ if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
* pm_runtime_put_autosuspend(&msm_host->pdev->dev);
* }
*/
-
+ msm_host->enabled = true;
return 0;
}
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ msm_host->enabled = false;
dsi_op_mode_config(msm_host,
!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8e9d5c255820..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c56268cbdb3d..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 0af951aaeea1..b3fffc8dbb2a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
}
-static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
- struct msm_dsi_phy_clk_request *clk_req)
-{
- /*
- * TODO: These params need to be computed, they're currently hardcoded
- * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
- * default escape clock of 19.2 Mhz.
- */
-
- timing->hs_halfbyte_en = 0;
- timing->clk_zero = 0x1c;
- timing->clk_prepare = 0x07;
- timing->clk_trail = 0x07;
- timing->hs_exit = 0x23;
- timing->hs_zero = 0x21;
- timing->hs_prepare = 0x07;
- timing->hs_trail = 0x07;
- timing->hs_rqst = 0x05;
- timing->ta_sure = 0x00;
- timing->ta_go = 0x03;
- timing->ta_get = 0x04;
-
- timing->shared_timings.clk_pre = 0x2d;
- timing->shared_timings.clk_post = 0x0d;
-
- return 0;
-}
-
static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req)
{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0e0c87252ab0..7a16242bf8bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
- format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
if (!format) {
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
- ret = PTR_ERR(fb);
- goto fail;
+ return PTR_ERR(fb);
}
bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fail_unlock:
mutex_unlock(&dev->struct_mutex);
-fail:
-
- if (ret) {
- if (fb)
- drm_framebuffer_remove(fb);
- }
-
+ drm_framebuffer_remove(fb);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 95196479f651..f583bb4222f9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
- /* For non-cached buffers, ensure the new pages are clean
- * because display controller, GPU, etc. are not coherent:
- */
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ if (msm_obj->sgt) {
+ /* For non-cached buffers, ensure the new
+ * pages are clean because display controller,
+ * GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents,
+ DMA_BIDIRECTIONAL);
- if (msm_obj->sgt)
sg_free_table(msm_obj->sgt);
-
- kfree(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ }
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..aaa329dc020e 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
struct drm_crtc *crtc);
+ /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+ const struct msm_format *(*get_format)(struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
/* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
int (*set_split_display)(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6f402c4f2bdd..ab61c038f42c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->bo.bdev = &drm->ttm.bdev;
- nvbo->cli = cli;
/* This is confusing, and doesn't actually mean we want an uncached
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index be8e00b49cde..73c48440d4d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,8 +26,6 @@ struct nouveau_bo {
struct list_head vma_list;
- struct nouveau_cli *cli;
-
unsigned contig:1;
unsigned page:5;
unsigned kind:8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index dff51a0ee028..8c093ca4222e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8bd739cfd00d..2b3ccd850750 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3264,10 +3264,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_unregister(&mstc->connector);
- drm_modeset_lock_all(drm->dev);
drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+
+ drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
mstc->port = NULL;
- drm_modeset_unlock_all(drm->dev);
+ drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
drm_connector_unreference(&mstc->connector);
}
@@ -3277,9 +3278,7 @@ nv50_mstm_register_connector(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- drm_modeset_lock_all(drm->dev);
drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
- drm_modeset_unlock_all(drm->dev);
drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 5e2e65e88847..7f3ac6b13b56 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -828,6 +828,12 @@ static void dispc_ovl_set_scale_coef(struct dispc_device *dispc,
h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
+ if (!h_coef || !v_coef) {
+ dev_err(&dispc->pdev->dev, "%s: failed to find scale coefs\n",
+ __func__);
+ return;
+ }
+
for (i = 0; i < 8; i++) {
u32 h, hv;
@@ -2342,7 +2348,7 @@ static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc,
}
if (in_width > maxsinglelinewidth) {
- DSSERR("Cannot scale max input width exceeded");
+ DSSERR("Cannot scale max input width exceeded\n");
return -EINVAL;
}
return 0;
@@ -2424,13 +2430,13 @@ again:
}
if (in_width > (maxsinglelinewidth * 2)) {
- DSSERR("Cannot setup scaling");
- DSSERR("width exceeds maximum width possible");
+ DSSERR("Cannot setup scaling\n");
+ DSSERR("width exceeds maximum width possible\n");
return -EINVAL;
}
if (in_width > maxsinglelinewidth && *five_taps) {
- DSSERR("cannot setup scaling with five taps");
+ DSSERR("cannot setup scaling with five taps\n");
return -EINVAL;
}
return 0;
@@ -2472,7 +2478,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
in_width > maxsinglelinewidth && ++*decim_x);
if (in_width > maxsinglelinewidth) {
- DSSERR("Cannot scale width exceeds max line width");
+ DSSERR("Cannot scale width exceeds max line width\n");
return -EINVAL;
}
@@ -2490,7 +2496,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
* bandwidth. Despite what theory says this appears to
* be true also for 16-bit color formats.
*/
- DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x);
+ DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)\n", *decim_x);
return -EINVAL;
}
@@ -4633,7 +4639,7 @@ static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
i734_buf.size, &i734_buf.paddr,
GFP_KERNEL);
if (!i734_buf.vaddr) {
- dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed",
+ dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index fb1c27f69e3a..3d662e6805eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -142,7 +142,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
}
struct dpi_clk_calc_ctx {
- struct dss_pll *pll;
+ struct dpi_data *dpi;
unsigned int clkout_idx;
/* inputs */
@@ -191,7 +191,7 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
- return dispc_div_calc(ctx->pll->dss->dispc, dispc,
+ return dispc_div_calc(ctx->dpi->dss->dispc, dispc,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@@ -208,8 +208,8 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.fint = fint;
ctx->pll_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss),
+ return dss_pll_hsdiv_calc_a(ctx->dpi->pll, clkdco,
+ ctx->pck_min, dss_get_max_fck_rate(ctx->dpi->dss),
dpi_calc_hsdiv_cb, ctx);
}
@@ -219,7 +219,7 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(ctx->pll->dss->dispc, fck,
+ return dispc_div_calc(ctx->dpi->dss->dispc, fck,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@@ -230,7 +230,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
unsigned long clkin;
memset(ctx, 0, sizeof(*ctx));
- ctx->pll = dpi->pll;
+ ctx->dpi = dpi;
ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
clkin = clk_get_rate(dpi->pll->clkin);
@@ -244,7 +244,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
pll_min = 0;
pll_max = 0;
- return dss_pll_calc_a(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->dpi->pll, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
} else { /* DSS_PLL_TYPE_B */
@@ -275,6 +275,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
bool ok;
memset(ctx, 0, sizeof(*ctx));
+ ctx->dpi = dpi;
if (pck > 1000 * i * i * i)
ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
else
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 97c88861d67a..5879f45f6fc9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -679,7 +679,7 @@ static int hdmi_audio_config(struct device *dev,
struct omap_dss_audio *dss_audio)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
mutex_lock(&hd->lock);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 35ed2add6189..813ba42f2753 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -922,8 +922,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
const struct hdmi4_features *features;
struct resource *res;
+ const struct soc_device_attribute *soc;
- features = soc_device_match(hdmi4_soc_devices)->data;
+ soc = soc_device_match(hdmi4_soc_devices);
+ if (!soc)
+ return -ENODEV;
+
+ features = soc->data;
core->cts_swmode = features->cts_swmode;
core->audio_use_mclk = features->audio_use_mclk;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d28da9ac3e90..ae1a001d1b83 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -671,7 +671,7 @@ static int hdmi_audio_config(struct device *dev,
struct omap_dss_audio *dss_audio)
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
mutex_lock(&hd->lock);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index a0d7b1d905e8..5cde26ac937b 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -121,6 +121,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
if (dssdrv->read_edid) {
void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+ if (!edid)
+ return 0;
+
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
drm_edid_is_valid(edid)) {
drm_mode_connector_update_edid_property(
@@ -139,6 +142,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(dev);
struct videomode vm = {0};
+ if (!mode)
+ return 0;
+
dssdrv->get_timings(dssdev, &vm);
drm_display_mode_from_videomode(&vm, mode);
@@ -200,6 +206,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
if (!r) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
+
+ if (!new_mode)
+ return MODE_BAD;
+
new_mode->clock = vm.pixelclock / 1000;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f9fa1c90b35c..401c02e9e6b2 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -401,12 +401,16 @@ int tiler_unpin(struct tiler_block *block)
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
u16 h, u16 align)
{
- struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ struct tiler_block *block;
u32 min_align = 128;
int ret;
unsigned long flags;
u32 slot_bytes;
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
BUG_ON(!validfmt(fmt));
/* convert width/height to slots */
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index d7f7bc9f061a..817be3c41863 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -90,7 +90,7 @@ static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
{
int i;
unsigned long index;
- bool area_free;
+ bool area_free = false;
unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
unsigned long curr_bit = bit_offset;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..01665b98c57e 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
- struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
- struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..864b456080c4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -167,6 +167,7 @@ struct qxl_release {
int id;
int type;
+ struct qxl_bo *release_bo;
uint32_t release_offset;
uint32_t surface_release_id;
struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e238a1a2eca1..6cc9f3367fa0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
goto out_free_reloc;
/* TODO copy slow path code from i915 */
- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
unwritten = __copy_from_user_inatomic_nocache
- (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE),
+ (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
u64_to_user_ptr(cmd->command), cmd->command_size);
{
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..7cb214577275 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
list_del(&entry->tv.head);
kfree(entry);
}
+ release->release_bo = NULL;
}
void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
{
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
- struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
if (idr_ret < 0)
return idr_ret;
- bo = to_qxl_bo(entry->tv.bo);
+ bo = create_rel->release_bo;
+ (*release)->release_bo = bo;
(*release)->release_offset = create_rel->release_offset + 64;
qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+ (*release)->release_bo = bo;
(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
qdev->current_release_bo_offset[cur_idx]++;
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
- struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+ struct qxl_bo *bo = release->release_bo;
- ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
if (!ptr)
return NULL;
- info = ptr + (release->release_offset & ~PAGE_SIZE);
+ info = ptr + (release->release_offset & ~PAGE_MASK);
return info;
}
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
- struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+ struct qxl_bo *bo = release->release_bo;
void *ptr;
- ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+ ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e415d2c097a7..48d0e6bd0508 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -140,6 +140,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
+ */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 97a0a639dad9..90d5b41007bf 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index bffff4c9fbf5..be3f14d7746d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
}
}
-static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
- const struct drm_display_mode *mode)
-{
- struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
- struct sun4i_tcon *tcon = lvds->tcon;
- u32 hsync = mode->hsync_end - mode->hsync_start;
- u32 vsync = mode->vsync_end - mode->vsync_start;
- unsigned long rate = mode->clock * 1000;
- long rounded_rate;
-
- DRM_DEBUG_DRIVER("Validating modes...\n");
-
- if (hsync < 1)
- return MODE_HSYNC_NARROW;
-
- if (hsync > 0x3ff)
- return MODE_HSYNC_WIDE;
-
- if ((mode->hdisplay < 1) || (mode->htotal < 1))
- return MODE_H_ILLEGAL;
-
- if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
- return MODE_BAD_HVALUE;
-
- DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
-
- if (vsync < 1)
- return MODE_VSYNC_NARROW;
-
- if (vsync > 0x3ff)
- return MODE_VSYNC_WIDE;
-
- if ((mode->vdisplay < 1) || (mode->vtotal < 1))
- return MODE_V_ILLEGAL;
-
- if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
- return MODE_BAD_VVALUE;
-
- DRM_DEBUG_DRIVER("Vertical parameters OK\n");
-
- tcon->dclk_min_div = 7;
- tcon->dclk_max_div = 7;
- rounded_rate = clk_round_rate(tcon->dclk, rate);
- if (rounded_rate < rate)
- return MODE_CLOCK_LOW;
-
- if (rounded_rate > rate)
- return MODE_CLOCK_HIGH;
-
- DRM_DEBUG_DRIVER("Clock rate OK\n");
-
- return MODE_OK;
-}
-
static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.disable = sun4i_lvds_encoder_disable,
.enable = sun4i_lvds_encoder_enable,
- .mode_valid = sun4i_lvds_encoder_mode_valid,
};
static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f0481b7b60c5..06c94e3a5f15 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
while (npages >= HPAGE_PMD_NR) {
gfp_t huge_flags = gfp_flags;
- huge_flags |= GFP_TRANSHUGE;
+ huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
huge_flags &= ~__GFP_MOVABLE;
huge_flags &= ~__GFP_COMP;
p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
GFP_USER | GFP_DMA32, "uc dma", 0);
ttm_page_pool_init_locked(&_manager->wc_pool_huge,
- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM) &
+ ~(__GFP_MOVABLE | __GFP_COMP),
"wc huge", order);
ttm_page_pool_init_locked(&_manager->uc_pool_huge,
- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM) &
+ ~(__GFP_MOVABLE | __GFP_COMP)
, "uc huge", order);
_manager->options.max_size = max_pages;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 8a25d1974385..f63d99c302e4 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
gfp_flags |= __GFP_ZERO;
if (huge) {
- gfp_flags |= GFP_TRANSHUGE;
+ gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
gfp_flags &= ~__GFP_MOVABLE;
gfp_flags &= ~__GFP_COMP;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 2decc8e2c79f..add9cc97a3b6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
vc4_bo_set_label(obj, -1);
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index bf4667481935..c61dff594195 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
struct vc4_async_flip_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
+ struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
struct vc4_seqno_cb cb;
@@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb);
+
+ /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
+ * when the planes are updated through the async update path.
+ * FIXME: we should move to generic async-page-flip when it's
+ * available, so that we can get rid of this hand-made cleanup_fb()
+ * logic.
+ */
+ if (flip_state->old_fb) {
+ struct drm_gem_cma_object *cma_bo;
+ struct vc4_bo *bo;
+
+ cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
+ bo = to_vc4_bo(&cma_bo->base);
+ vc4_bo_dec_usecnt(bo);
+ drm_framebuffer_put(flip_state->old_fb);
+ }
+
kfree(flip_state);
up(&vc4->async_modeset);
@@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ /* Increment the BO usecnt here, so that we never end up with an
+ * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
+ * plane is later updated through the non-async path.
+ * FIXME: we should move to generic async-page-flip when it's
+ * available, so that we can get rid of this hand-made prepare_fb()
+ * logic.
+ */
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret)
+ return ret;
+
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
- if (!flip_state)
+ if (!flip_state) {
+ vc4_bo_dec_usecnt(bo);
return -ENOMEM;
+ }
drm_framebuffer_get(fb);
flip_state->fb = fb;
@@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
drm_framebuffer_put(fb);
+ vc4_bo_dec_usecnt(bo);
kfree(flip_state);
return ret;
}
+ /* Save the current FB before it's replaced by the new one in
+ * drm_atomic_set_fb_for_plane(). We'll need the old FB in
+ * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
+ * it consistent.
+ * FIXME: we should move to generic async-page-flip when it's
+ * available, so that we can get rid of this hand-made cleanup_fb()
+ * logic.
+ */
+ flip_state->old_fb = plane->state->fb;
+ if (flip_state->old_fb)
+ drm_framebuffer_get(flip_state->old_fb);
+
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
/* Immediately update the plane's legacy fb pointer, so that later
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 72c9dbd81d7f..f185812970da 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -96,7 +96,6 @@ struct vc4_dpi {
struct platform_device *pdev;
struct drm_encoder *encoder;
- struct drm_connector *connector;
void __iomem *regs;
@@ -164,14 +163,31 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
struct drm_display_mode *mode = &encoder->crtc->mode;
struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector = NULL, *connector_scan;
u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
int ret;
- if (dpi->connector->display_info.num_bus_formats) {
- u32 bus_format = dpi->connector->display_info.bus_formats[0];
+ /* Look up the connector attached to DPI so we can get the
+ * bus_format. Ideally the bridge would tell us the
+ * bus_format we want, but it doesn't yet, so assume that it's
+ * uniform throughout the bridge chain.
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector_scan, &conn_iter) {
+ if (connector_scan->encoder == encoder) {
+ connector = connector_scan;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (connector && connector->display_info.num_bus_formats) {
+ u32 bus_format = connector->display_info.bus_formats[0];
switch (bus_format) {
case MEDIA_BUS_FMT_RGB888_1X24:
@@ -199,6 +215,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
DRM_ERROR("Unknown media bus format %d\n", bus_format);
break;
}
+ } else {
+ /* Default to 24bit if no connector found. */
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
}
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..7c95ed5c5cac 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_file *vc4file = file->driver_priv;
vc4_perfmon_close_file(vc4file);
+ kfree(vc4file);
}
static const struct vm_operations_struct vc4_vm_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index ce39390be389..13dcaad06798 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -503,7 +503,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* the scl fields here.
*/
if (num_planes == 1) {
- scl0 = vc4_get_scl_field(state, 1);
+ scl0 = vc4_get_scl_field(state, 0);
scl1 = scl0;
} else {
scl0 = vc4_get_scl_field(state, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index d3f15bf60900..7cf82b071de2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
+ kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 48e4f1df6e5d..020070d483d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -368,7 +368,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 2582ffd36bb5..ba0cdb743c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set)
struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb;
struct drm_crtc *tmp;
- struct drm_modeset_acquire_ctx *ctx;
struct drm_device *dev = set->crtc->dev;
+ struct drm_modeset_acquire_ctx ctx;
int ret;
- ctx = dev->mode_config.acquire_ctx;
+ drm_modeset_acquire_init(&ctx, 0);
restart:
/*
@@ -458,7 +458,7 @@ restart:
fb = set->fb;
- ret = crtc->funcs->set_config(set, ctx);
+ ret = crtc->funcs->set_config(set, &ctx);
if (ret == 0) {
crtc->primary->crtc = crtc;
crtc->primary->fb = fb;
@@ -473,20 +473,13 @@ restart:
}
if (ret == -EDEADLK) {
- dev->mode_config.acquire_ctx = NULL;
-
-retry_locking:
- drm_modeset_backoff(ctx);
-
- ret = drm_modeset_lock_all_ctx(dev, ctx);
- if (ret)
- goto retry_locking;
-
- dev->mode_config.acquire_ctx = ctx;
-
+ drm_modeset_backoff(&ctx);
goto restart;
}
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
}
@@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info)
}
mutex_lock(&par->bo_mutex);
- drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_fb_kms_framebuffer(info);
if (ret)
goto out_unlock;
@@ -657,7 +649,6 @@ out_unlock:
drm_mode_destroy(vmw_priv->dev, old_mode);
par->set_mode = mode;
- drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return ret;
@@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->max_width = fb_width;
par->max_height = fb_height;
- drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
par->max_height, &par->con,
&par->crtc, &init_mode);
- if (ret) {
- drm_modeset_unlock_all(vmw_priv->dev);
+ if (ret)
goto err_kms;
- }
info->var.xres = init_mode->hdisplay;
info->var.yres = init_mode->vdisplay;
- drm_modeset_unlock_all(vmw_priv->dev);
/*
* Create buffers and alloc memory
@@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
cancel_delayed_work_sync(&par->local_work);
unregister_framebuffer(info);
+ mutex_lock(&par->bo_mutex);
(void) vmw_fb_kms_detach(par, true, true);
+ mutex_unlock(&par->bo_mutex);
vfree(par->vmalloc);
framebuffer_release(info);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f11601b6fd74..96fd7a03d2f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_display_mode *mode;
int i = 0;
+ int ret = 0;
+ mutex_lock(&dev_priv->dev->mode_config.mutex);
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
head) {
if (i == unit)
@@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (i != unit) {
DRM_ERROR("Could not find initial display unit.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock;
}
if (list_empty(&con->modes))
@@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (list_empty(&con->modes)) {
DRM_ERROR("Could not find initial display mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock;
}
du = vmw_connector_to_du(con);
@@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
head);
}
- return 0;
+ out_unlock:
+ mutex_unlock(&dev_priv->dev->mode_config.mutex);
+
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 648f8127f65a..3d667e903beb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return ret;
}
+ vps->dmabuf_size = size;
+
/*
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 60252fd796f6..0000434a1fbd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -462,10 +462,11 @@ config HID_LENOVO
select NEW_LEDS
select LEDS_CLASS
---help---
- Support for Lenovo devices that are not fully compliant with HID standard.
+ Support for IBM/Lenovo devices that are not fully compliant with HID standard.
- Say Y if you want support for the non-compliant features of the Lenovo
- Thinkpad standalone keyboards, e.g:
+ Say Y if you want support for horizontal scrolling of the IBM/Lenovo
+ Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
+ standalone keyboards, e.g:
- ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
configuration)
- ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a3a7ead3012..46f5ecd11bf7 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -525,6 +525,9 @@
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
+#define I2C_VENDOR_ID_RAYD 0x2386
+#define I2C_PRODUCT_ID_RAYD_3118 0x3118
+
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -549,6 +552,13 @@
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
+#define USB_VENDOR_ID_IBM 0x04b3
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109
+
#define USB_VENDOR_ID_IDEACOM 0x1cb6
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
@@ -681,6 +691,7 @@
#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
+#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
@@ -961,6 +972,7 @@
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
#define USB_DEVICE_ID_SIS_TS 0x1013
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
+#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6836a856c243..930652c25120 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (dev->battery_report_type == HID_FEATURE_REPORT) {
+ if (dev->battery_status != HID_BATTERY_REPORTED &&
+ !dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_STATUS:
- if (!dev->battery_reported &&
- dev->battery_report_type == HID_FEATURE_REPORT) {
+ if (dev->battery_status != HID_BATTERY_REPORTED &&
+ !dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
dev->battery_capacity = value;
- dev->battery_reported = true;
+ dev->battery_status = HID_BATTERY_QUERIED;
}
- if (!dev->battery_reported)
+ if (dev->battery_status == HID_BATTERY_UNKNOWN)
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else if (dev->battery_capacity == 100)
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->battery_report_type = report_type;
dev->battery_report_id = field->report->id;
+ /*
+ * Stylus is normally not connected to the device and thus we
+ * can't query the device and get meaningful battery strength.
+ * We have to wait for the device to report it on its own.
+ */
+ dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
+ field->physical == HID_DG_STYLUS;
+
dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
if (IS_ERR(dev->battery)) {
error = PTR_ERR(dev->battery);
@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
capacity = hidinput_scale_battery_capacity(dev, value);
- if (!dev->battery_reported || capacity != dev->battery_capacity) {
+ if (dev->battery_status != HID_BATTERY_REPORTED ||
+ capacity != dev->battery_capacity) {
dev->battery_capacity = capacity;
- dev->battery_reported = true;
+ dev->battery_status = HID_BATTERY_REPORTED;
power_supply_changed(dev->battery);
}
}
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 1ac4ff4d57a6..643b6eb54442 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -6,6 +6,17 @@
*
* Copyright (c) 2012 Bernhard Seibold
* Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
+ *
+ * Linux IBM/Lenovo Scrollpoint mouse driver:
+ * - IBM Scrollpoint III
+ * - IBM Scrollpoint Pro
+ * - IBM Scrollpoint Optical
+ * - IBM Scrollpoint Optical 800dpi
+ * - IBM Scrollpoint Optical 800dpi Pro
+ * - Lenovo Scrollpoint Optical
+ *
+ * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
+ * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
*/
/*
@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
return 0;
}
+static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ if (usage->hid == HID_GD_Z) {
+ hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
+ return 1;
+ }
+ return 0;
+}
+
static int lenovo_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_CBTKBD:
return lenovo_input_mapping_cptkbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
+ case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
+ case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
+ return lenovo_input_mapping_scrollpoint(hdev, hi, field,
+ usage, bit, max);
default:
return 0;
}
@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
{ }
};
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index fbfcc8009432..b39844adea47 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 97689e98e53f..cc33622253aa 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,6 +47,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
+#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
/* flags */
#define I2C_HID_STARTED 0
@@ -171,6 +172,10 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
+ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
+ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
};
@@ -1220,6 +1225,16 @@ static int i2c_hid_resume(struct device *dev)
if (ret)
return ret;
+ /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+ * after resume, after this it will be back normal.
+ * otherwise it issues too many incomplete reports.
+ */
+ if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
+ ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 157b44aacdff..acc2536c8094 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -77,21 +77,21 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
int curr_hid_dev = client_data->cur_hid_dev;
- if (data_len < sizeof(struct hostif_msg_hdr)) {
- dev_err(&client_data->cl_device->dev,
- "[hid-ish]: error, received %u which is less than data header %u\n",
- (unsigned int)data_len,
- (unsigned int)sizeof(struct hostif_msg_hdr));
- ++client_data->bad_recv_cnt;
- ish_hw_reset(hid_ishtp_cl->dev);
- return;
- }
-
payload = recv_buf + sizeof(struct hostif_msg_hdr);
total_len = data_len;
cur_pos = 0;
do {
+ if (cur_pos + sizeof(struct hostif_msg) > total_len) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: error, received %u which is less than data header %u\n",
+ (unsigned int)data_len,
+ (unsigned int)sizeof(struct hostif_msg_hdr));
+ ++client_data->bad_recv_cnt;
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+
recv_msg = (struct hostif_msg *)(recv_buf + cur_pos);
payload_len = recv_msg->hdr.size;
@@ -412,9 +412,7 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
{
struct ishtp_hid_data *hid_data = hid->driver_data;
struct ishtp_cl_data *client_data = hid_data->client_data;
- static unsigned char buf[10];
- unsigned int len;
- struct hostif_msg_to_sensor *msg = (struct hostif_msg_to_sensor *)buf;
+ struct hostif_msg_to_sensor msg = {};
int rv;
int i;
@@ -426,14 +424,11 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
return;
}
- len = sizeof(struct hostif_msg_to_sensor);
-
- memset(msg, 0, sizeof(struct hostif_msg_to_sensor));
- msg->hdr.command = (report_type == HID_FEATURE_REPORT) ?
+ msg.hdr.command = (report_type == HID_FEATURE_REPORT) ?
HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (hid == client_data->hid_sensor_hubs[i]) {
- msg->hdr.device_id =
+ msg.hdr.device_id =
client_data->hid_devices[i].dev_id;
break;
}
@@ -442,8 +437,9 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
if (i == client_data->num_hid_devices)
return;
- msg->report_id = report_id;
- rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+ msg.report_id = report_id;
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg,
+ sizeof(msg));
if (rv)
hid_ishtp_trace(client_data, "%s hid %p send failed\n",
__func__, hid);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f272cdd9bd55..2623a567ffba 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
list_del(&device->device_link);
spin_unlock_irqrestore(&dev->device_list_lock, flags);
dev_err(dev->devc, "Failed to register ISHTP client device\n");
- kfree(device);
+ put_device(&device->dev);
return NULL;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b54ef1ffcbec..ee7a37eb159a 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1213,8 +1213,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
devres->root = root;
error = sysfs_create_group(devres->root, group);
- if (error)
+ if (error) {
+ devres_free(devres);
return error;
+ }
devres_add(&wacom->hdev->dev, devres);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 6da16a879c9f..5f947ec20dcb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
return tool_type;
}
+static void wacom_exit_report(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->pen_input;
+ struct wacom_features *features = &wacom->features;
+ unsigned char *data = wacom->data;
+ int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
+
+ /*
+ * Reset all states otherwise we lose the initial states
+ * when in-prox next time
+ */
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_TILT_X, 0);
+ input_report_abs(input, ABS_TILT_Y, 0);
+ if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_SIDE, 0);
+ input_report_key(input, BTN_EXTRA, 0);
+ input_report_abs(input, ABS_THROTTLE, 0);
+ input_report_abs(input, ABS_RZ, 0);
+ } else {
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
+ if (features->type >= INTUOS3S)
+ input_report_abs(input, ABS_Z, 0);
+ }
+ input_report_key(input, wacom->tool[idx], 0);
+ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ wacom->id[idx] = 0;
+}
+
static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
if (!wacom->id[idx])
return 1;
- /*
- * Reset all states otherwise we lose the initial states
- * when in-prox next time
- */
- input_report_abs(input, ABS_X, 0);
- input_report_abs(input, ABS_Y, 0);
- input_report_abs(input, ABS_DISTANCE, 0);
- input_report_abs(input, ABS_TILT_X, 0);
- input_report_abs(input, ABS_TILT_Y, 0);
- if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
- input_report_key(input, BTN_LEFT, 0);
- input_report_key(input, BTN_MIDDLE, 0);
- input_report_key(input, BTN_RIGHT, 0);
- input_report_key(input, BTN_SIDE, 0);
- input_report_key(input, BTN_EXTRA, 0);
- input_report_abs(input, ABS_THROTTLE, 0);
- input_report_abs(input, ABS_RZ, 0);
- } else {
- input_report_abs(input, ABS_PRESSURE, 0);
- input_report_key(input, BTN_STYLUS, 0);
- input_report_key(input, BTN_STYLUS2, 0);
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_WHEEL, 0);
- if (features->type >= INTUOS3S)
- input_report_abs(input, ABS_Z, 0);
- }
- input_report_key(input, wacom->tool[idx], 0);
- input_report_abs(input, ABS_MISC, 0); /* reset tool id */
- input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
- wacom->id[idx] = 0;
+ wacom_exit_report(wacom);
return 2;
}
@@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
if (!valid)
continue;
+ if (!prox) {
+ wacom->shared->stylus_in_proximity = false;
+ wacom_exit_report(wacom);
+ input_sync(pen_input);
+ return;
+ }
if (range) {
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f249a4428458..6ec307c93ece 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
config SENSORS_K10TEMP
tristate "AMD Family 10h+ temperature sensor"
- depends on X86 && PCI
+ depends on X86 && PCI && AMD_NB
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 051a72eecb24..3b73dee6fdc6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <asm/amd_nb.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -40,6 +41,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#endif
+#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#endif
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -59,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define NB_CAP_HTC 0x00000400
/*
- * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
- * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
- * Control]
+ * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
+ * and REG_REPORTED_TEMPERATURE have been moved to
+ * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
+ * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
*/
+#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
/* F17h M01h Access througn SMN */
@@ -70,8 +77,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
struct k10temp_data {
struct pci_dev *pdev;
+ void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
int temp_offset;
+ u32 temp_adjust_mask;
};
struct tctl_offset {
@@ -84,6 +93,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 5 1600X", 20000 },
{ 0x17, "AMD Ryzen 7 1700X", 20000 },
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
+ { 0x17, "AMD Ryzen 7 2700X", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -92,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
};
+static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
+}
+
static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
@@ -108,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
+}
+
static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
{
amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
@@ -116,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
{
- amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
- F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
static ssize_t temp1_input_show(struct device *dev,
@@ -129,6 +150,8 @@ static ssize_t temp1_input_show(struct device *dev,
data->read_tempreg(data->pdev, &regval);
temp = (regval >> 21) * 125;
+ if (regval & data->temp_adjust_mask)
+ temp -= 49000;
if (temp > data->temp_offset)
temp -= data->temp_offset;
else
@@ -152,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
u32 regval;
int value;
- pci_read_config_dword(data->pdev,
- REG_HARDWARE_THERMAL_CONTROL, &regval);
+ data->read_htcreg(data->pdev, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
value -= ((regval >> 24) & 0xf) * 500;
@@ -173,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct pci_dev *pdev = data->pdev;
if (index >= 2) {
- u32 reg_caps, reg_htc;
+ u32 reg;
+
+ if (!data->read_htcreg)
+ return 0;
pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- &reg_caps);
- pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
- &reg_htc);
- if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
+ &reg);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, &reg);
+ if (!(reg & HTC_ENABLE))
return 0;
}
return attr->mode;
@@ -259,12 +286,16 @@ static int k10temp_probe(struct pci_dev *pdev,
data->pdev = pdev;
if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
- boot_cpu_data.x86_model == 0x70))
+ boot_cpu_data.x86_model == 0x70)) {
+ data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
- else if (boot_cpu_data.x86 == 0x17)
+ } else if (boot_cpu_data.x86 == 0x17) {
+ data->temp_adjust_mask = 0x80000;
data->read_tempreg = read_tempreg_nb_f17;
- else
+ } else {
+ data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
+ }
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 8b0bc4fc06e8..b0bc77bf2cd9 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
/* Activate logical device if needed */
val = superio_inb(sioaddr, SIO_REG_ENABLE);
if (!(val & 0x01)) {
- pr_err("EC is disabled\n");
- goto fail;
+ pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
+ superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
superio_exit(sioaddr);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 363bf56eb0f2..91976b6ca300 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
scmi_chip_info.info = ptr_scmi_ci;
chip_info = &scmi_chip_info;
- for (type = 0; type < hwmon_max && nr_count[type]; type++) {
+ for (type = 0; type < hwmon_max; type++) {
+ if (!nr_count[type])
+ continue;
+
scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
type, hwmon_attributes[type]);
*ptr_scmi_ci++ = scmi_hwmon_chan++;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c4865b08d7fb..8d21b9825d71 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -707,7 +707,6 @@ config I2C_MPC
config I2C_MT65XX
tristate "MediaTek I2C adapter"
depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_DMA
help
This selects the MediaTek(R) Integrated Inter Circuit bus driver
for MT65xx and MT81xx.
@@ -885,7 +884,6 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
- depends on HAS_DMA
depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
@@ -1098,7 +1096,6 @@ config I2C_XLP9XX
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on HAS_DMA
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index fd36c39ddf4e..0cdba29ae0a9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev);
/* Enable the adapter */
- __i2c_dw_enable_and_wait(dev, true);
+ __i2c_dw_enable(dev, true);
+
+ /* Dummy read to avoid the register getting stuck on Bay Trail */
+ dw_readl(dev, DW_IC_ENABLE_STATUS);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 692b34125866..e0d59e9ff3c6 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -966,8 +966,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
if (!(priv->features & FEATURE_HOST_NOTIFY))
return;
- priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
-
if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
SMBSLVCMD(priv));
@@ -1615,6 +1613,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ /* Remember original Host Notify setting */
+ if (priv->features & FEATURE_HOST_NOTIFY)
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
@@ -1699,6 +1701,15 @@ static void i801_remove(struct pci_dev *dev)
*/
}
+static void i801_shutdown(struct pci_dev *dev)
+{
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
+ /* Restore config registers to avoid hard hang on some systems */
+ i801_disable_host_notify(priv);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
#ifdef CONFIG_PM
static int i801_suspend(struct device *dev)
{
@@ -1728,6 +1739,7 @@ static struct pci_driver i801_driver = {
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
+ .shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 2aa0e83174c5..dae8ac618a52 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
* TODO: We could potentially loop and retry in the case
* of MSP_TWI_XFER_TIMEOUT.
*/
- return -1;
+ return -EIO;
}
- return 0;
+ return num;
}
static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 25fcc3c1e32b..4053259bccb8 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -86,6 +86,7 @@ struct sprd_i2c {
u32 count;
int irq;
int err;
+ bool is_suspended;
};
static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
int im, ret;
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0)
return ret;
@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
struct sprd_i2c *i2c_dev = dev_id;
struct i2c_msg *msg = i2c_dev->msg;
bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
- u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
u32 i2c_tran;
if (msg->flags & I2C_M_RD)
i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
else
- i2c_tran = i2c_count;
+ i2c_tran = i2c_dev->count;
/*
* If we got one ACK from slave when writing data, and we did not
@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
{
struct sprd_i2c *i2c_dev = dev_id;
struct i2c_msg *msg = i2c_dev->msg;
- u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
u32 i2c_tran;
if (msg->flags & I2C_M_RD)
i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
else
- i2c_tran = i2c_count;
+ i2c_tran = i2c_dev->count;
/*
* If we did not get one ACK from slave when writing data, then we
@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
{
+ struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adap);
+ i2c_dev->is_suspended = true;
+ i2c_unlock_adapter(&i2c_dev->adap);
+
return pm_runtime_force_suspend(pdev);
}
static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
{
+ struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adap);
+ i2c_dev->is_suspended = false;
+ i2c_unlock_adapter(&i2c_dev->adap);
+
return pm_runtime_force_resume(pdev);
}
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index e4be86b3de9a..7235c7302bb7 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
}
mutex_unlock(&vb->lock);
}
- return 0;
+ return num;
error:
mutex_unlock(&vb->lock);
return error;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index a9126b3cda61..7c3b4740b94b 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
msgs[1].buf = buffer;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- dev_err(&client->adapter->dev, "i2c read failed\n");
- else
+ if (ret < 0) {
+ /* Getting a NACK is unfortunately normal with some DSTDs */
+ if (ret == -EREMOTEIO)
+ dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
+ data_len, client->addr, cmd, ret);
+ else
+ dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
+ data_len, client->addr, cmd, ret);
+ } else {
memcpy(data, buffer, data_len);
+ }
kfree(buffer);
return ret;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 1adeebaa81b0..1ba40bb2b966 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1845,6 +1845,9 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
unsigned long orig_jiffies;
int ret, try;
+ if (WARN_ON(!msgs || num < 1))
+ return -EINVAL;
+
if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
return -EOPNOTSUPP;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 036a03f0d0a6..1667b6e7674f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
*/
if (msgs[i].flags & I2C_M_RECV_LEN) {
if (!(msgs[i].flags & I2C_M_RD) ||
- msgs[i].buf[0] < 1 ||
+ msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
msgs[i].len < msgs[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
res = -EINVAL;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index ee270e065ba9..2a972ed6851b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING
pages on demand instead.
config INFINIBAND_ADDR_TRANS
- bool
+ bool "RDMA/CM"
depends on INFINIBAND
default y
+ ---help---
+ Support for RDMA communication manager (CM).
+ This allows for a generic connection abstraction over RDMA.
config INFINIBAND_ADDR_TRANS_CONFIGFS
bool
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index e337b08de2ff..fb2d347f760f 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -291,14 +291,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
* so lookup free slot only if requested.
*/
if (pempty && empty < 0) {
- if (data->props & GID_TABLE_ENTRY_INVALID) {
- /* Found an invalid (free) entry; allocate it */
- if (data->props & GID_TABLE_ENTRY_DEFAULT) {
- if (default_gid)
- empty = curr_index;
- } else {
- empty = curr_index;
- }
+ if (data->props & GID_TABLE_ENTRY_INVALID &&
+ (default_gid ==
+ !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
+ /*
+ * Found an invalid (free) entry; allocate it.
+ * If default GID is requested, then our
+ * found slot must be one of the DEFAULT
+ * reserved slots or we fail.
+ * This ensures that only DEFAULT reserved
+ * slots are used for default property GIDs.
+ */
+ empty = curr_index;
}
}
@@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
return ret;
}
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
- union ib_gid *gid, struct ib_gid_attr *attr)
+static int
+_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr,
+ unsigned long mask, bool default_gid)
{
struct ib_gid_table *table;
int ret = 0;
@@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
- ix = find_gid(table, gid, attr, false,
- GID_ATTR_FIND_MASK_GID |
- GID_ATTR_FIND_MASK_GID_TYPE |
- GID_ATTR_FIND_MASK_NETDEV,
- NULL);
+ ix = find_gid(table, gid, attr, default_gid, mask, NULL);
if (ix < 0) {
ret = -EINVAL;
goto out_unlock;
@@ -452,6 +454,17 @@ out_unlock:
return ret;
}
+int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+ union ib_gid *gid, struct ib_gid_attr *attr)
+{
+ unsigned long mask = GID_ATTR_FIND_MASK_GID |
+ GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_DEFAULT |
+ GID_ATTR_FIND_MASK_NETDEV;
+
+ return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
+}
+
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
struct net_device *ndev)
{
@@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode)
{
- union ib_gid gid;
+ union ib_gid gid = { };
struct ib_gid_attr gid_attr;
struct ib_gid_table *table;
unsigned int gid_type;
@@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
- make_default_gid(ndev, &gid);
+ mask = GID_ATTR_FIND_MASK_GID_TYPE |
+ GID_ATTR_FIND_MASK_DEFAULT |
+ GID_ATTR_FIND_MASK_NETDEV;
memset(&gid_attr, 0, sizeof(gid_attr));
gid_attr.ndev = ndev;
@@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
gid_attr.gid_type = gid_type;
if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
- mask = GID_ATTR_FIND_MASK_GID_TYPE |
- GID_ATTR_FIND_MASK_DEFAULT;
+ make_default_gid(ndev, &gid);
__ib_cache_gid_add(ib_dev, port, &gid,
&gid_attr, mask, true);
} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
- ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
+ _ib_cache_gid_del(ib_dev, port, &gid,
+ &gid_attr, mask, true);
}
}
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 51a641002e10..a693fcd4c513 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -382,6 +382,8 @@ struct cma_hdr {
#define CMA_VERSION 0x00
struct cma_req_info {
+ struct sockaddr_storage listen_addr_storage;
+ struct sockaddr_storage src_addr_storage;
struct ib_device *device;
int port;
union ib_gid local_gid;
@@ -866,7 +868,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
- union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) {
@@ -889,12 +890,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret)
goto out;
- ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
- rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
- &sgid, NULL);
- if (ret)
- goto out;
-
BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
if (conn_param)
@@ -1340,11 +1335,11 @@ static bool validate_net_dev(struct net_device *net_dev,
}
static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
- const struct cma_req_info *req)
+ struct cma_req_info *req)
{
- struct sockaddr_storage listen_addr_storage, src_addr_storage;
- struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
- *src_addr = (struct sockaddr *)&src_addr_storage;
+ struct sockaddr *listen_addr =
+ (struct sockaddr *)&req->listen_addr_storage;
+ struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
struct net_device *net_dev;
const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
int err;
@@ -1359,11 +1354,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
if (!net_dev)
return ERR_PTR(-ENODEV);
- if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
- dev_put(net_dev);
- return ERR_PTR(-EHOSTUNREACH);
- }
-
return net_dev;
}
@@ -1490,15 +1480,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
}
}
+ /*
+ * Net namespace might be getting deleted while route lookup,
+ * cm_id lookup is in progress. Therefore, perform netdevice
+ * validation, cm_id lookup under rcu lock.
+ * RCU lock along with netdevice state check, synchronizes with
+ * netdevice migrating to different net namespace and also avoids
+ * case where net namespace doesn't get deleted while lookup is in
+ * progress.
+ * If the device state is not IFF_UP, its properties such as ifindex
+ * and nd_net cannot be trusted to remain valid without rcu lock.
+ * net/core/dev.c change_net_namespace() ensures to synchronize with
+ * ongoing operations on net device after device is closed using
+ * synchronize_net().
+ */
+ rcu_read_lock();
+ if (*net_dev) {
+ /*
+ * If netdevice is down, it is likely that it is administratively
+ * down or it might be migrating to different namespace.
+ * In that case avoid further processing, as the net namespace
+ * or ifindex may change.
+ */
+ if (((*net_dev)->flags & IFF_UP) == 0) {
+ id_priv = ERR_PTR(-EHOSTUNREACH);
+ goto err;
+ }
+
+ if (!validate_net_dev(*net_dev,
+ (struct sockaddr *)&req.listen_addr_storage,
+ (struct sockaddr *)&req.src_addr_storage)) {
+ id_priv = ERR_PTR(-EHOSTUNREACH);
+ goto err;
+ }
+ }
+
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
rdma_ps_from_service_id(req.service_id),
cma_port_from_service_id(req.service_id));
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+err:
+ rcu_read_unlock();
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
}
-
return id_priv;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 9821ae900f6d..da12da1c36f6 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_sockaddr,
u8 nl_client)
{
- struct hlist_head *hash_bucket_head;
+ struct hlist_head *hash_bucket_head = NULL;
struct iwpm_mapping_info *map_info;
unsigned long flags;
int ret = -EINVAL;
@@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
}
}
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+
+ if (!hash_bucket_head)
+ kfree(map_info);
return ret;
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index c50596f7f98a..b28452a55a08 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
static struct list_head ib_mad_port_list;
-static u32 ib_mad_client_id = 0;
+static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
spin_lock_irqsave(&port_priv->reg_lock, flags);
- mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
+ mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
/*
* Make sure MAD registration (if supplied)
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index cc2966380c0c..c0e4fd55e2cc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -255,6 +255,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
struct net_device *rdma_ndev)
{
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
+ unsigned long gid_type_mask;
if (!rdma_ndev)
return;
@@ -264,21 +265,22 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
rcu_read_lock();
- if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
- is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
- BONDING_SLAVE_STATE_INACTIVE) {
- unsigned long gid_type_mask;
-
+ if (((rdma_ndev != event_ndev &&
+ !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
+ is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
+ ==
+ BONDING_SLAVE_STATE_INACTIVE)) {
rcu_read_unlock();
+ return;
+ }
- gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+ rcu_read_unlock();
- ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
- gid_type_mask,
- IB_CACHE_GID_DEFAULT_MODE_DELETE);
- } else {
- rcu_read_unlock();
- }
+ gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+ ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
+ gid_type_mask,
+ IB_CACHE_GID_DEFAULT_MODE_DELETE);
}
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 74329483af6d..eab43b17e9cf 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -159,6 +159,23 @@ static void ucma_put_ctx(struct ucma_context *ctx)
complete(&ctx->comp);
}
+/*
+ * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
+ * CM_ID is bound.
+ */
+static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
+{
+ struct ucma_context *ctx = ucma_get_ctx(file, id);
+
+ if (IS_ERR(ctx))
+ return ctx;
+ if (!ctx->cm_id->device) {
+ ucma_put_ctx(ctx);
+ return ERR_PTR(-EINVAL);
+ }
+ return ctx;
+}
+
static void ucma_close_event_id(struct work_struct *work)
{
struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
@@ -683,7 +700,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- if (!rdma_addr_size_in6(&cmd.src_addr) ||
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
!rdma_addr_size_in6(&cmd.dst_addr))
return -EINVAL;
@@ -734,7 +751,7 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1050,7 +1067,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
if (!cmd.conn_param.valid)
return -EINVAL;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1092,7 +1109,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1120,7 +1137,7 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1139,7 +1156,7 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1167,15 +1184,10 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (cmd.qp_state > IB_QPS_ERR)
return -EINVAL;
- ctx = ucma_get_ctx(file, cmd.id);
+ ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (!ctx->cm_id->device) {
- ret = -EINVAL;
- goto out;
- }
-
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
@@ -1316,13 +1328,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
- return -EINVAL;
-
optval = memdup_user(u64_to_user_ptr(cmd.optval),
cmd.optlen);
if (IS_ERR(optval)) {
@@ -1384,7 +1396,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
else
return -EINVAL;
- ctx = ucma_get_ctx(file, cmd->id);
+ ctx = ucma_get_ctx_dev(file, cmd->id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 13cb5e4deb86..21a887c9523b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -691,6 +691,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
mr->device = pd->device;
mr->pd = pd;
+ mr->dm = NULL;
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
mr->res.type = RDMA_RESTRACK_MR;
@@ -765,6 +766,11 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
mr = uobj->object;
+ if (mr->dm) {
+ ret = -EINVAL;
+ goto put_uobjs;
+ }
+
if (cmd.flags & IB_MR_REREG_ACCESS) {
ret = ib_check_mr_access(cmd.access_flags);
if (ret)
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c93970dc8f1..8d32c4ae368c 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -234,6 +234,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
return -EINVAL;
}
+ for (; i < method_spec->num_buckets; i++) {
+ struct uverbs_attr_spec_hash *attr_spec_bucket =
+ method_spec->attr_buckets[i];
+
+ if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
+ attr_spec_bucket->num_attrs))
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index cbcec3da12f6..b4f016dfa23d 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -363,28 +363,28 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
- .ptr = {
+ { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- },
+ } },
},
};
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
- .ptr = {
+ { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
/* No need to specify any data */
.len = 0,
- }
+ } }
},
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
- .ptr = {
+ { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
- }
+ } }
},
};
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7eff3aeffe01..6ddfb1fade79 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1656,6 +1656,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
+ mr->dm = NULL;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
mr->need_inval = false;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6f2b26126c64..2be2e1ac1b5f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
* Deal with out-of-order and/or completions that complete
* prior unsignalled WRs.
*/
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
{
struct t4_cqe *hw_cqe, *swcqe, read_cqe;
struct c4iw_qp *qhp;
@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
if (qhp == NULL)
goto next_cqe;
+ if (flush_qhp != qhp) {
+ spin_lock(&qhp->lock);
+
+ if (qhp->wq.flushed == 1)
+ goto next_cqe;
+ }
+
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
goto next_cqe;
@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
next_cqe:
t4_hwcq_consume(&chp->cq);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+ if (qhp && flush_qhp != qhp)
+ spin_unlock(&qhp->lock);
}
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index feeb8ee6f4a2..44161ca4d2a8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->status_page->db_off = 0;
+ init_completion(&rdev->rqt_compl);
+ init_completion(&rdev->pbl_compl);
+ kref_init(&rdev->rqt_kref);
+ kref_init(&rdev->pbl_kref);
+
return 0;
err_free_status_page_and_wr_log:
if (c4iw_wr_log && rdev->wr_log)
@@ -893,13 +898,15 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
- destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ wait_for_completion(&rdev->pbl_compl);
+ wait_for_completion(&rdev->rqt_compl);
c4iw_ocqp_pool_destroy(rdev);
+ destroy_workqueue(rdev->free_workq);
c4iw_destroy_resource(&rdev->resource);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index cc929002c05e..831027717121 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -185,6 +185,10 @@ struct c4iw_rdev {
struct wr_log_entry *wr_log;
int wr_log_size;
struct workqueue_struct *free_workq;
+ struct completion rqt_compl;
+ struct completion pbl_compl;
+ struct kref rqt_kref;
+ struct kref pbl_kref;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index de77b6027d69..ae167b686608 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq);
- c4iw_flush_hw_cq(rchp);
+ c4iw_flush_hw_cq(rchp, qhp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
if (schp != rchp)
- c4iw_flush_hw_cq(schp);
+ c4iw_flush_hw_cq(schp, qhp);
sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock);
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 3cf25997ed2b..0ef25ae05e6f 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
+ kref_get(&rdev->pbl_kref);
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_pblpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+ gen_pool_destroy(rdev->pbl_pool);
+ complete(&rdev->pbl_compl);
+}
+
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
pr_debug("addr 0x%x size %d\n", addr, size);
@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->pbl_pool);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
/*
@@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
+ kref_get(&rdev->rqt_kref);
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_rqtpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+ gen_pool_destroy(rdev->rqt_pool);
+ complete(&rdev->rqt_compl);
+}
+
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
pr_debug("addr 0x%x size %d\n", addr, size << 6);
@@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->rqt_pool);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index a97055dd4fbd..b5fab55cc275 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
static int get_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
- int ret;
cpumask_var_t diff;
struct hfi1_affinity_node *entry;
struct cpu_mask_set *set = NULL;
@@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
extra[0] = '\0';
cpumask_clear(&msix->mask);
- ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
entry = node_affinity_lookup(dd->node);
switch (msix->type) {
@@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
* finds its CPU here.
*/
if (cpu == -1 && set) {
+ if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
+ return -ENOMEM;
+
if (cpumask_equal(&set->mask, &set->used)) {
/*
* We've used up all the CPUs, bump up the generation
@@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used);
+
+ free_cpumask_var(diff);
}
cpumask_set_cpu(cpu, &msix->mask);
@@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
hfi1_setup_sdma_notifier(msix);
}
- free_cpumask_var(diff);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 46d1475b2154..bd837a048bf4 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -433,31 +433,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_other_headers *ohdr = pkt->ohdr;
struct ib_grh *grh = pkt->grh;
u32 rqpn = 0, bth1;
- u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr);
+ u16 pkey;
+ u32 rlid, slid, dlid = 0;
u8 hdr_type, sc, svc_type;
bool is_mcast = false;
+ /* can be called from prescan */
if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
is_mcast = hfi1_is_16B_mcast(dlid);
pkey = hfi1_16B_get_pkey(pkt->hdr);
sc = hfi1_16B_get_sc(pkt->hdr);
+ dlid = hfi1_16B_get_dlid(pkt->hdr);
+ slid = hfi1_16B_get_slid(pkt->hdr);
hdr_type = HFI1_PKT_TYPE_16B;
} else {
is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
(dlid != be16_to_cpu(IB_LID_PERMISSIVE));
pkey = ib_bth_get_pkey(ohdr);
sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+ dlid = ib_get_dlid(pkt->hdr);
+ slid = ib_get_slid(pkt->hdr);
hdr_type = HFI1_PKT_TYPE_9B;
}
switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+ dlid = ppd->lid;
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
case IB_QPT_SMI:
case IB_QPT_GSI:
- case IB_QPT_UD:
- rlid = ib_get_slid(pkt->hdr);
+ rlid = slid;
rqpn = ib_get_sqpn(pkt->ohdr);
svc_type = IB_CC_SVCTYPE_UD;
break;
@@ -482,7 +494,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
dlid, rlid, sc, grh);
if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = bth1 & RVT_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc];
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 32c48265405e..cac2c62bc42d 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1537,13 +1537,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd);
void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
u32 rqpn, u8 svc_type);
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
- u32 pkey, u32 slid, u32 dlid, u8 sc5,
+ u16 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh);
void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
- u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh);
typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
- u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh);
#define PKEY_CHECK_INVALID -1
@@ -2437,7 +2437,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
- lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
hdr->lrh[0] = lrh0;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 33eba2356742..6309edf811df 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -88,9 +88,9 @@
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
*/
int num_user_contexts = -1;
-module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
+module_param_named(num_user_contexts, num_user_contexts, int, 0444);
MODULE_PARM_DESC(
- num_user_contexts, "Set max number of user contexts to use");
+ num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
uint krcvqs[RXE_NUM_DATA_VL];
int krcvqsset;
@@ -1209,30 +1209,49 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
kfree(ad);
}
-static void __hfi1_free_devdata(struct kobject *kobj)
+/**
+ * hfi1_clean_devdata - cleans up per-unit data structure
+ * @dd: pointer to a valid devdata structure
+ *
+ * It cleans up all data structures set up by
+ * by hfi1_alloc_devdata().
+ */
+static void hfi1_clean_devdata(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd =
- container_of(kobj, struct hfi1_devdata, kobj);
struct hfi1_asic_data *ad;
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
- idr_remove(&hfi1_unit_table, dd->unit);
- list_del(&dd->list);
+ if (!list_empty(&dd->list)) {
+ idr_remove(&hfi1_unit_table, dd->unit);
+ list_del_init(&dd->list);
+ }
ad = release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- if (ad)
- finalize_asic_data(dd, ad);
+
+ finalize_asic_data(dd, ad);
free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
free_percpu(dd->send_schedule);
free_percpu(dd->tx_opstats);
+ dd->int_counter = NULL;
+ dd->rcv_limit = NULL;
+ dd->send_schedule = NULL;
+ dd->tx_opstats = NULL;
sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
+static void __hfi1_free_devdata(struct kobject *kobj)
+{
+ struct hfi1_devdata *dd =
+ container_of(kobj, struct hfi1_devdata, kobj);
+
+ hfi1_clean_devdata(dd);
+}
+
static struct kobj_type hfi1_devdata_type = {
.release = __hfi1_free_devdata,
};
@@ -1265,6 +1284,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
return ERR_PTR(-ENOMEM);
dd->num_pports = nports;
dd->pport = (struct hfi1_pportdata *)(dd + 1);
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
idr_preload(GFP_KERNEL);
@@ -1331,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
return dd;
bail:
- if (!list_empty(&dd->list))
- list_del_init(&dd->list);
- rvt_dealloc_device(&dd->verbs_dev.rdi);
+ hfi1_clean_devdata(dd);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 83d66e862207..c1c982908b4b 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
resource_size_t addr;
int ret = 0;
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index d486355880cb..cbf7faa5038c 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd)
{
/* Release memory allocated for eprom or fallback file read. */
kfree(dd->platform_config.data);
+ dd->platform_config.data = NULL;
}
void get_port_type(struct hfi1_pportdata *ppd)
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 1869f639c3ae..b5966991d647 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
{
+ if (!ad)
+ return;
clean_i2c_bus(ad->i2c_bus0);
ad->i2c_bus0 = NULL;
clean_i2c_bus(ad->i2c_bus1);
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 3daa94bdae3a..c0071ca4147a 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
ohdr->bth[2] = cpu_to_be32(bth2);
}
+/**
+ * hfi1_make_ruc_header_16B - build a 16B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle,
@@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
else
middle = 0;
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ becn = true;
+ middle = 0;
+ }
if (middle)
build_ahg(qp, bth2);
else
@@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
bth0 |= pkey;
bth0 |= extra_bytes << 20;
- if (qp->s_flags & RVT_S_ECN) {
- qp->s_flags &= ~RVT_S_ECN;
- /* we recently received a FECN, so return a BECN */
- becn = true;
- }
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
if (!ppd->lid)
@@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
pkey, becn, 0, l4, priv->s_sc);
}
+/**
+ * hfi1_make_ruc_header_9B - build a 9B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle,
@@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
else
middle = 0;
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
+ middle = 0;
+ }
if (middle)
build_ahg(qp, bth2);
else
@@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
bth0 |= pkey;
bth0 |= extra_bytes << 20;
- if (qp->s_flags & RVT_S_ECN) {
- qp->s_flags &= ~RVT_S_ECN;
- /* we recently received a FECN, so return a BECN */
- bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
- }
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
lrh0,
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index bcf3b0bebac8..69c17a5ef038 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
}
void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
- u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh)
{
u64 pbc, pbc_flags = 0;
@@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
}
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
- u32 pkey, u32 slid, u32 dlid, u8 sc5,
+ u16 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh)
{
u64 pbc, pbc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 0eeabfbee192..63b5b3edabcb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8;
- if (table->type >= HEM_TYPE_MTT)
+ if (type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num;
table->hem = kcalloc(num_hem, sizeof(*table->hem),
@@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem)
goto err_kcalloc_hem_buf;
- if (check_whether_bt_num_3(table->type, hop_num)) {
+ if (check_whether_bt_num_3(type, hop_num)) {
unsigned long num_bt_l1;
num_bt_l1 = (num_hem + bt_chunk_num - 1) /
@@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
goto err_kcalloc_l1_dma;
}
- if (check_whether_bt_num_2(table->type, hop_num) ||
- check_whether_bt_num_3(table->type, hop_num)) {
+ if (check_whether_bt_num_2(type, hop_num) ||
+ check_whether_bt_num_3(type, hop_num)) {
table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
GFP_KERNEL);
if (!table->bt_l0)
@@ -1039,14 +1039,14 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8b84ab7800d8..25916e8522ed 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
return -EINVAL;
}
+ if (wr->opcode == IB_WR_RDMA_READ) {
+ dev_err(hr_dev->dev, "Not support inline data!\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < wr->num_sge; i++) {
memcpy(wqe, ((void *)wr->sg_list[i].addr),
wr->sg_list[i].length);
@@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
- *bad_wr = NULL;
+ *bad_wr = wr;
return -EOPNOTSUPP;
}
@@ -182,7 +187,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
wr->wr_id;
- owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
+ owner_bit =
+ ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) {
@@ -456,6 +462,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
spin_unlock_irqrestore(&qp->sq.lock, flags);
+ *bad_wr = wr;
return -EOPNOTSUPP;
}
}
@@ -2592,10 +2599,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, 0);
- roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
- roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, 0);
+ if (attr_mask & IB_QP_DEST_QPN) {
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_56_dqpn_err,
+ V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ }
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
@@ -2650,8 +2659,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL;
}
- if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+ if (attr_mask & IB_QP_ALT_PATH) {
dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
return -EINVAL;
}
@@ -2800,10 +2808,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_140_RR_MAX_S, 0);
}
- roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
- roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
- V2_QPC_BYTE_56_DQPN_S, 0);
+ if (attr_mask & IB_QP_DEST_QPN) {
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+ roce_set_field(qpc_mask->byte_56_dqpn_err,
+ V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ }
/* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -2845,7 +2855,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else
+ else if (attr_mask & IB_QP_PATH_MTU)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
@@ -2922,11 +2932,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return -EINVAL;
}
- /* If exist optional param, return error */
- if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
- (attr_mask & IB_QP_CUR_STATE) ||
- (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+ /* Not support alternate path and path migration */
+ if ((attr_mask & IB_QP_ALT_PATH) ||
+ (attr_mask & IB_QP_PATH_MIG_STATE)) {
dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL;
}
@@ -3161,7 +3169,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+ (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
/* Nothing */
;
} else {
@@ -4478,7 +4487,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
- dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+ dev_err(dev, "[mailbox cmd] create eqc failed.\n");
goto err_cmd_mbox;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e289a924e789..d4aad34c21e2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -620,7 +620,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
to_hr_ucontext(ib_pd->uobject->context),
ucmd.db_addr, &hr_qp->rdb);
if (ret) {
- dev_err(dev, "rp record doorbell map failed!\n");
+ dev_err(dev, "rq record doorbell map failed!\n");
goto err_mtt;
}
}
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 17f4f151a97f..61d8b06375bb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
/* Add to the first block the misalignment that it suffers from. */
total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
last_block_end = current_block_start + current_block_len;
- last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
+ last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
total_len += (last_block_aligned_end - last_block_end);
if (total_len & ((1ULL << block_shift) - 1ULL))
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 50af8915e7ec..199648adac74 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
MLX4_IB_RX_HASH_SRC_PORT_TCP |
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
- MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ MLX4_IB_RX_HASH_DST_PORT_UDP |
+ MLX4_IB_RX_HASH_INNER)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index bce263b92821..fb4d77be019b 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,7 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help---
This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 77d257ec899b..6d52ea03574e 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -849,7 +849,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return 0;
err_cqb:
- kfree(*cqb);
+ kvfree(*cqb);
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index daa919e5a442..b4d8ff8ab807 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -52,7 +52,6 @@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
-#include <linux/mlx5/fs_helpers.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
@@ -180,7 +179,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
if (rep_ndev == ndev)
roce->netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev;
- } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) {
+ } else if (ndev->dev.parent == &mdev->pdev->dev) {
roce->netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev;
}
@@ -4757,7 +4756,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- return mlx5_get_vector_affinity(dev->mdev, comp_vector);
+ return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
@@ -5427,9 +5426,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- if (!dev->mdev->priv.uar)
- return -ENOMEM;
- return 0;
+ return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
}
static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1520a2f20f98..90a9c461cedc 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int *order)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *u;
int err;
- *umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
- err = PTR_ERR_OR_ZERO(*umem);
+ *umem = NULL;
+
+ u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ err = PTR_ERR_OR_ZERO(u);
if (err) {
- *umem = NULL;
- mlx5_ib_err(dev, "umem get failed (%d)\n", err);
+ mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
return err;
}
- mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
- ib_umem_release(*umem);
+ ib_umem_release(u);
return -EINVAL;
}
+ *umem = u;
+
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift);
@@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
int access_flags = flags & IB_MR_REREG_ACCESS ?
new_access_flags :
mr->access_flags;
- u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
- u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
int page_shift = 0;
int upd_flags = 0;
int npages = 0;
int ncont = 0;
int order = 0;
+ u64 addr, len;
int err;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
@@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ if (!mr->umem)
+ return -EINVAL;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ addr = virt_addr;
+ len = length;
+ } else {
+ addr = mr->umem->address;
+ len = mr->umem->length;
+ }
+
if (flags != IB_MR_REREG_PD) {
/*
* Replace umem. This needs to be done whether or not UMR is
@@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
+ mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7ed4b70f6447..87b7c1be2a11 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -259,7 +259,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
@@ -2451,18 +2455,18 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ if (rate == IB_RATE_PORT_CURRENT)
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
- }
- return rate + MLX5_STAT_RATE_OFFSET;
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+
+ return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 0a75164cedea..007d5e8a0121 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
/**
* nes_netdev_start_xmit
*/
-static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 61927c165b59..4cf11063e0b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
.mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
| RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
+ | RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 7bdaf71b8221..785199990457 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -728,7 +728,6 @@ next_wqe:
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) {
- kfree_skb(skb);
rxe_run_task(&qp->req.task, 1);
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index a65c9969f7fc..955ff3b6da9c 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
if (err) {
pr_err("Failed sending RDMA reply.\n");
- kfree_skb(skb);
return RESPST_ERR_RNR;
}
@@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
}
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
- if (err) {
+ if (err)
pr_err_ratelimited("Failed sending ack\n");
- kfree_skb(skb);
- }
err1:
return err;
@@ -1141,7 +1138,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
rxe_drop_ref(qp);
- kfree_skb(skb_copy);
rc = RESPST_CLEANUP;
goto out;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 161ba8c76285..cf291f90b58f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1094,7 +1094,7 @@ drop_and_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 4be3aef40bd2..267da8215e08 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
}
/* opa_vnic_calc_entropy - calculate the packet entropy */
-u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
+u8 opa_vnic_calc_entropy(struct sk_buff *skb)
{
- u16 hash16;
-
- /*
- * Get flow based 16-bit hash and then XOR the upper and lower bytes
- * to get the entropy.
- * __skb_tx_hash limits qcount to 16 bits. Hence, get 15-bit hash.
- */
- hash16 = __skb_tx_hash(adapter->netdev, skb, BIT(15));
- return (u8)((hash16 >> 8) ^ (hash16 & 0xff));
+ u32 hash = skb_get_hash(skb);
+
+ /* store XOR of all bytes in lower 8 bits */
+ hash ^= hash >> 8;
+ hash ^= hash >> 16;
+
+ /* return lower 8 bits as entropy */
+ return (u8)(hash & 0xFF);
}
/* opa_vnic_get_def_port - get default port based on entropy */
@@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
- entropy = opa_vnic_calc_entropy(adapter, skb);
+ entropy = opa_vnic_calc_entropy(skb);
def_port = opa_vnic_get_def_port(adapter, entropy);
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index afd95f432262..43ac61ffef4a 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
-u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
+u8 opa_vnic_calc_entropy(struct sk_buff *skb);
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ce57e0f10289..0c8aec62a425 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
/* pass entropy and vl as metadata in skb */
mdata = skb_push(skb, sizeof(*mdata));
- mdata->entropy = opa_vnic_calc_entropy(adapter, skb);
+ mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
accel_priv, fallback);
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
index c74ee9633041..99db8fe5173a 100644
--- a/drivers/infiniband/ulp/srp/Kconfig
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_SRP
tristate "InfiniBand SCSI RDMA Protocol"
- depends on SCSI
+ depends on SCSI && INFINIBAND_ADDR_TRANS
select SCSI_SRP_ATTRS
---help---
Support for the SCSI RDMA Protocol over InfiniBand. This
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
index 31ee83d528d9..fb8b7182f05e 100644
--- a/drivers/infiniband/ulp/srpt/Kconfig
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_SRPT
tristate "InfiniBand SCSI RDMA Protocol target support"
- depends on INFINIBAND && TARGET_CORE
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
---help---
Support for the SCSI RDMA Protocol (SRP) Target driver. The
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 46115a392098..c81c79d01d93 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -31,6 +31,7 @@
enum evdev_clock_type {
EV_CLK_REAL = 0,
EV_CLK_MONO,
+ EV_CLK_BOOT,
EV_CLK_MAX
};
@@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
case CLOCK_REALTIME:
clk_type = EV_CLK_REAL;
break;
- case CLOCK_BOOTTIME:
case CLOCK_MONOTONIC:
clk_type = EV_CLK_MONO;
break;
+ case CLOCK_BOOTTIME:
+ clk_type = EV_CLK_BOOT;
+ break;
default:
return -EINVAL;
}
@@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle,
ev_time[EV_CLK_MONO] = ktime_get();
ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+ ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+ TK_OFFS_BOOT);
rcu_read_lock();
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
const struct input_device_id *id)
{
struct input_leds *leds;
+ struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
- struct input_led *led = &leds->leds[led_no];
+ if (!input_led_info[led_code].name)
+ continue;
+ led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
- if (!input_led_info[led_code].name)
- continue;
-
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 0a67f235ba88..38f9501acdf0 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
- z = packet[4] & 0x7c;
+ z = packet[4] & 0x7f;
/*
* The x and y values tend to be quite large, and when used
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 76edbf2c1bce..082defc329a8 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
if (len > RMI_SPI_XFER_SIZE_LIMIT)
return -EINVAL;
- if (rmi_spi->xfer_buf_size < len)
- rmi_spi_manage_pools(rmi_spi, len);
+ if (rmi_spi->xfer_buf_size < len) {
+ ret = rmi_spi_manage_pools(rmi_spi, len);
+ if (ret < 0)
+ return ret;
+ }
if (addr == 0)
/*
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 4f15496fec8b..3e613afa10b4 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP
If unsure, say N.
- To compile this driver as a moudle, choose M here : the
+ To compile this driver as a module, choose M here : the
module will be called hideep_ts.
config TOUCHSCREEN_ILI210X
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 7659bc48f1db..09194721aed2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -23,12 +23,13 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
+#include <linux/property.h>
#include <asm/unaligned.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -268,14 +269,19 @@ static const struct v4l2_file_operations mxt_video_fops = {
.poll = vb2_fop_poll,
};
+enum mxt_suspend_mode {
+ MXT_SUSPEND_DEEP_SLEEP = 0,
+ MXT_SUSPEND_T9_CTRL = 1,
+};
+
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
struct input_dev *input_dev;
char phys[64]; /* device physical location */
- const struct mxt_platform_data *pdata;
struct mxt_object *object_table;
- struct mxt_info info;
+ struct mxt_info *info;
+ void *raw_info_block;
unsigned int irq;
unsigned int max_x;
unsigned int max_y;
@@ -324,6 +330,11 @@ struct mxt_data {
/* for config update handling */
struct completion crc_completion;
+
+ u32 *t19_keymap;
+ unsigned int t19_num_keys;
+
+ enum mxt_suspend_mode suspend_mode;
};
struct mxt_vb2_buffer {
@@ -450,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
{
u8 appmode = data->client->addr;
u8 bootloader;
+ u8 family_id = data->info ? data->info->family_id : 0;
switch (appmode) {
case 0x4a:
case 0x4b:
/* Chips after 1664S use different scheme */
- if (retry || data->info.family_id >= 0xa2) {
+ if (retry || family_id >= 0xa2) {
bootloader = appmode - 0x24;
break;
}
@@ -682,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
struct mxt_object *object;
int i;
- for (i = 0; i < data->info.object_num; i++) {
+ for (i = 0; i < data->info->object_num; i++) {
object = data->object_table + i;
if (object->type == type)
return object;
@@ -742,15 +754,14 @@ static int mxt_write_object(struct mxt_data *data,
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
- const struct mxt_platform_data *pdata = data->pdata;
int i;
- for (i = 0; i < pdata->t19_num_keys; i++) {
- if (pdata->t19_keymap[i] == KEY_RESERVED)
+ for (i = 0; i < data->t19_num_keys; i++) {
+ if (data->t19_keymap[i] == KEY_RESERVED)
continue;
/* Active-low switch */
- input_report_key(input, pdata->t19_keymap[i],
+ input_report_key(input, data->t19_keymap[i],
!(message[1] & BIT(i)));
}
}
@@ -758,7 +769,7 @@ static void mxt_input_button(struct mxt_data *data, u8 *message)
static void mxt_input_sync(struct mxt_data *data)
{
input_mt_report_pointer_emulation(data->input_dev,
- data->pdata->t19_num_keys);
+ data->t19_num_keys);
input_sync(data->input_dev);
}
@@ -1453,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
data_pos += offset;
}
- if (cfg_info.family_id != data->info.family_id) {
+ if (cfg_info.family_id != data->info->family_id) {
dev_err(dev, "Family ID mismatch!\n");
return -EINVAL;
}
- if (cfg_info.variant_id != data->info.variant_id) {
+ if (cfg_info.variant_id != data->info->variant_id) {
dev_err(dev, "Variant ID mismatch!\n");
return -EINVAL;
}
@@ -1503,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
/* Malloc memory to store configuration */
cfg_start_ofs = MXT_OBJECT_START +
- data->info.object_num * sizeof(struct mxt_object) +
+ data->info->object_num * sizeof(struct mxt_object) +
MXT_INFO_CHECKSUM_SIZE;
config_mem_size = data->mem_size - cfg_start_ofs;
config_mem = kzalloc(config_mem_size, GFP_KERNEL);
@@ -1554,20 +1565,6 @@ release_mem:
return ret;
}
-static int mxt_get_info(struct mxt_data *data)
-{
- struct i2c_client *client = data->client;
- struct mxt_info *info = &data->info;
- int error;
-
- /* Read 7-byte info block starting at address 0 */
- error = __mxt_read_reg(client, 0, sizeof(*info), info);
- if (error)
- return error;
-
- return 0;
-}
-
static void mxt_free_input_device(struct mxt_data *data)
{
if (data->input_dev) {
@@ -1582,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data)
video_unregister_device(&data->dbg.vdev);
v4l2_device_unregister(&data->dbg.v4l2);
#endif
-
- kfree(data->object_table);
data->object_table = NULL;
+ data->info = NULL;
+ kfree(data->raw_info_block);
+ data->raw_info_block = NULL;
kfree(data->msg_buf);
data->msg_buf = NULL;
data->T5_address = 0;
@@ -1600,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data)
data->max_reportid = 0;
}
-static int mxt_get_object_table(struct mxt_data *data)
+static int mxt_parse_object_table(struct mxt_data *data,
+ struct mxt_object *object_table)
{
struct i2c_client *client = data->client;
- size_t table_size;
- struct mxt_object *object_table;
- int error;
int i;
u8 reportid;
u16 end_address;
- table_size = data->info.object_num * sizeof(struct mxt_object);
- object_table = kzalloc(table_size, GFP_KERNEL);
- if (!object_table) {
- dev_err(&data->client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
- }
-
- error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
- object_table);
- if (error) {
- kfree(object_table);
- return error;
- }
-
/* Valid Report IDs start counting from 1 */
reportid = 1;
data->mem_size = 0;
- for (i = 0; i < data->info.object_num; i++) {
+ for (i = 0; i < data->info->object_num; i++) {
struct mxt_object *object = object_table + i;
u8 min_id, max_id;
@@ -1651,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data)
switch (object->type) {
case MXT_GEN_MESSAGE_T5:
- if (data->info.family_id == 0x80 &&
- data->info.version < 0x20) {
+ if (data->info->family_id == 0x80 &&
+ data->info->version < 0x20) {
/*
* On mXT224 firmware versions prior to V2.0
* read and discard unused CRC byte otherwise
@@ -1707,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data)
/* If T44 exists, T5 position has to be directly after */
if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
dev_err(&client->dev, "Invalid T44 position\n");
- error = -EINVAL;
- goto free_object_table;
+ return -EINVAL;
}
data->msg_buf = kcalloc(data->max_reportid,
data->T5_msg_size, GFP_KERNEL);
- if (!data->msg_buf) {
- dev_err(&client->dev, "Failed to allocate message buffer\n");
+ if (!data->msg_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int mxt_read_info_block(struct mxt_data *data)
+{
+ struct i2c_client *client = data->client;
+ int error;
+ size_t size;
+ void *id_buf, *buf;
+ uint8_t num_objects;
+ u32 calculated_crc;
+ u8 *crc_ptr;
+
+ /* If info block already allocated, free it */
+ if (data->raw_info_block)
+ mxt_free_object_table(data);
+
+ /* Read 7-byte ID information block starting at address 0 */
+ size = sizeof(struct mxt_info);
+ id_buf = kzalloc(size, GFP_KERNEL);
+ if (!id_buf)
+ return -ENOMEM;
+
+ error = __mxt_read_reg(client, 0, size, id_buf);
+ if (error)
+ goto err_free_mem;
+
+ /* Resize buffer to give space for rest of info block */
+ num_objects = ((struct mxt_info *)id_buf)->object_num;
+ size += (num_objects * sizeof(struct mxt_object))
+ + MXT_INFO_CHECKSUM_SIZE;
+
+ buf = krealloc(id_buf, size, GFP_KERNEL);
+ if (!buf) {
error = -ENOMEM;
- goto free_object_table;
+ goto err_free_mem;
+ }
+ id_buf = buf;
+
+ /* Read rest of info block */
+ error = __mxt_read_reg(client, MXT_OBJECT_START,
+ size - MXT_OBJECT_START,
+ id_buf + MXT_OBJECT_START);
+ if (error)
+ goto err_free_mem;
+
+ /* Extract & calculate checksum */
+ crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
+ data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
+
+ calculated_crc = mxt_calculate_crc(id_buf, 0,
+ size - MXT_INFO_CHECKSUM_SIZE);
+
+ /*
+ * CRC mismatch can be caused by data corruption due to I2C comms
+ * issue or else device is not using Object Based Protocol (eg i2c-hid)
+ */
+ if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
+ dev_err(&client->dev,
+ "Info Block CRC error calculated=0x%06X read=0x%06X\n",
+ calculated_crc, data->info_crc);
+ error = -EIO;
+ goto err_free_mem;
}
- data->object_table = object_table;
+ data->raw_info_block = id_buf;
+ data->info = (struct mxt_info *)id_buf;
+
+ dev_info(&client->dev,
+ "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
+ data->info->family_id, data->info->variant_id,
+ data->info->version >> 4, data->info->version & 0xf,
+ data->info->build, data->info->object_num);
+
+ /* Parse object table information */
+ error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
+ if (error) {
+ dev_err(&client->dev, "Error %d parsing object table\n", error);
+ mxt_free_object_table(data);
+ goto err_free_mem;
+ }
+
+ data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
return 0;
-free_object_table:
- mxt_free_object_table(data);
+err_free_mem:
+ kfree(id_buf);
return error;
}
@@ -1858,7 +1918,6 @@ static void mxt_input_close(struct input_dev *dev);
static void mxt_set_up_as_touchpad(struct input_dev *input_dev,
struct mxt_data *data)
{
- const struct mxt_platform_data *pdata = data->pdata;
int i;
input_dev->name = "Atmel maXTouch Touchpad";
@@ -1872,15 +1931,14 @@ static void mxt_set_up_as_touchpad(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
MXT_PIXELS_PER_MM);
- for (i = 0; i < pdata->t19_num_keys; i++)
- if (pdata->t19_keymap[i] != KEY_RESERVED)
+ for (i = 0; i < data->t19_num_keys; i++)
+ if (data->t19_keymap[i] != KEY_RESERVED)
input_set_capability(input_dev, EV_KEY,
- pdata->t19_keymap[i]);
+ data->t19_keymap[i]);
}
static int mxt_initialize_input_device(struct mxt_data *data)
{
- const struct mxt_platform_data *pdata = data->pdata;
struct device *dev = &data->client->dev;
struct input_dev *input_dev;
int error;
@@ -1946,7 +2004,7 @@ static int mxt_initialize_input_device(struct mxt_data *data)
}
/* If device has buttons we assume it is a touchpad */
- if (pdata->t19_num_keys) {
+ if (data->t19_num_keys) {
mxt_set_up_as_touchpad(input_dev, data);
mt_flags |= INPUT_MT_POINTER;
} else {
@@ -2039,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data)
int error;
while (1) {
- error = mxt_get_info(data);
+ error = mxt_read_info_block(data);
if (!error)
break;
@@ -2070,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data)
msleep(MXT_FW_RESET_TIME);
}
- /* Get object table information */
- error = mxt_get_object_table(data);
- if (error) {
- dev_err(&client->dev, "Error %d reading object table\n", error);
- return error;
- }
-
error = mxt_acquire_irq(data);
if (error)
- goto err_free_object_table;
+ return error;
error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
&client->dev, GFP_KERNEL, data,
@@ -2087,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data)
if (error) {
dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
error);
- goto err_free_object_table;
+ return error;
}
return 0;
-
-err_free_object_table:
- mxt_free_object_table(data);
- return error;
}
static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
@@ -2155,7 +2202,7 @@ recheck:
static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
unsigned int y)
{
- struct mxt_info *info = &data->info;
+ struct mxt_info *info = data->info;
struct mxt_dbg *dbg = &data->dbg;
unsigned int ofs, page;
unsigned int col = 0;
@@ -2483,7 +2530,7 @@ static const struct video_device mxt_video_device = {
static void mxt_debug_init(struct mxt_data *data)
{
- struct mxt_info *info = &data->info;
+ struct mxt_info *info = data->info;
struct mxt_dbg *dbg = &data->dbg;
struct mxt_object *object;
int error;
@@ -2569,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
struct device *dev = &data->client->dev;
- struct mxt_info *info = &data->info;
int error;
error = mxt_init_t7_power_cfg(data);
@@ -2594,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data,
mxt_debug_init(data);
- dev_info(dev,
- "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
- info->family_id, info->variant_id, info->version >> 4,
- info->version & 0xf, info->build, info->object_num);
-
return 0;
}
@@ -2607,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mxt_data *data = dev_get_drvdata(dev);
- struct mxt_info *info = &data->info;
+ struct mxt_info *info = data->info;
return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
info->version >> 4, info->version & 0xf, info->build);
}
@@ -2617,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mxt_data *data = dev_get_drvdata(dev);
- struct mxt_info *info = &data->info;
+ struct mxt_info *info = data->info;
return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
info->family_id, info->variant_id);
}
@@ -2656,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev,
return -ENOMEM;
error = 0;
- for (i = 0; i < data->info.object_num; i++) {
+ for (i = 0; i < data->info->object_num; i++) {
object = data->object_table + i;
if (!mxt_object_readable(object->type))
@@ -2868,7 +2909,7 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- switch (data->pdata->suspend_mode) {
+ switch (data->suspend_mode) {
case MXT_SUSPEND_T9_CTRL:
mxt_soft_reset(data);
@@ -2886,12 +2927,11 @@ static void mxt_start(struct mxt_data *data)
mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
break;
}
-
}
static void mxt_stop(struct mxt_data *data)
{
- switch (data->pdata->suspend_mode) {
+ switch (data->suspend_mode) {
case MXT_SUSPEND_T9_CTRL:
/* Touch disable */
mxt_write_object(data,
@@ -2921,55 +2961,49 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-#ifdef CONFIG_OF
-static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+static int mxt_parse_device_properties(struct mxt_data *data)
{
- struct mxt_platform_data *pdata;
- struct device_node *np = client->dev.of_node;
+ static const char keymap_property[] = "linux,gpio-keymap";
+ struct device *dev = &data->client->dev;
u32 *keymap;
- int proplen, ret;
-
- if (!np)
- return ERR_PTR(-ENOENT);
-
- pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
+ int n_keys;
+ int error;
- if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
- pdata->t19_num_keys = proplen / sizeof(u32);
+ if (device_property_present(dev, keymap_property)) {
+ n_keys = device_property_read_u32_array(dev, keymap_property,
+ NULL, 0);
+ if (n_keys <= 0) {
+ error = n_keys < 0 ? n_keys : -EINVAL;
+ dev_err(dev, "invalid/malformed '%s' property: %d\n",
+ keymap_property, error);
+ return error;
+ }
- keymap = devm_kzalloc(&client->dev,
- pdata->t19_num_keys * sizeof(keymap[0]),
- GFP_KERNEL);
+ keymap = devm_kmalloc_array(dev, n_keys, sizeof(*keymap),
+ GFP_KERNEL);
if (!keymap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- ret = of_property_read_u32_array(np, "linux,gpio-keymap",
- keymap, pdata->t19_num_keys);
- if (ret)
- dev_warn(&client->dev,
- "Couldn't read linux,gpio-keymap: %d\n", ret);
+ error = device_property_read_u32_array(dev, keymap_property,
+ keymap, n_keys);
+ if (error) {
+ dev_err(dev, "failed to parse '%s' property: %d\n",
+ keymap_property, error);
+ return error;
+ }
- pdata->t19_keymap = keymap;
+ data->t19_keymap = keymap;
+ data->t19_num_keys = n_keys;
}
- pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
-
- return pdata;
-}
-#else
-static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
-{
- return ERR_PTR(-ENOENT);
+ return 0;
}
-#endif
#ifdef CONFIG_ACPI
struct mxt_acpi_platform_data {
const char *hid;
- struct mxt_platform_data pdata;
+ const struct property_entry *props;
};
static unsigned int samus_touchpad_buttons[] = {
@@ -2979,14 +3013,16 @@ static unsigned int samus_touchpad_buttons[] = {
BTN_LEFT
};
+static const struct property_entry samus_touchpad_props[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", samus_touchpad_buttons),
+ { }
+};
+
static struct mxt_acpi_platform_data samus_platform_data[] = {
{
/* Touchpad */
.hid = "ATML0000",
- .pdata = {
- .t19_num_keys = ARRAY_SIZE(samus_touchpad_buttons),
- .t19_keymap = samus_touchpad_buttons,
- },
+ .props = samus_touchpad_props,
},
{
/* Touchscreen */
@@ -3004,14 +3040,16 @@ static unsigned int chromebook_tp_buttons[] = {
BTN_LEFT
};
+static const struct property_entry chromebook_tp_props[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_tp_buttons),
+ { }
+};
+
static struct mxt_acpi_platform_data chromebook_platform_data[] = {
{
/* Touchpad */
.hid = "ATML0000",
- .pdata = {
- .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
- .t19_keymap = chromebook_tp_buttons,
- },
+ .props = chromebook_tp_props,
},
{
/* Touchscreen */
@@ -3031,6 +3069,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
.driver_data = samus_platform_data,
},
{
+ /* Samsung Chromebook Pro */
+ .ident = "Samsung Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ {
/* Other Google Chromebooks */
.ident = "Chromebook",
.matches = {
@@ -3041,83 +3088,85 @@ static const struct dmi_system_id mxt_dmi_table[] = {
{ }
};
-static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+static int mxt_prepare_acpi_properties(struct i2c_client *client)
{
struct acpi_device *adev;
const struct dmi_system_id *system_id;
const struct mxt_acpi_platform_data *acpi_pdata;
- /*
- * Ignore ACPI devices representing bootloader mode.
- *
- * This is a bit of a hack: Google Chromebook BIOS creates ACPI
- * devices for both application and bootloader modes, but we are
- * interested in application mode only (if device is in bootloader
- * mode we'll end up switching into application anyway). So far
- * application mode addresses were all above 0x40, so we'll use it
- * as a threshold.
- */
- if (client->addr < 0x40)
- return ERR_PTR(-ENXIO);
-
adev = ACPI_COMPANION(&client->dev);
if (!adev)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
system_id = dmi_first_match(mxt_dmi_table);
if (!system_id)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
acpi_pdata = system_id->driver_data;
if (!acpi_pdata)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
while (acpi_pdata->hid) {
- if (!strcmp(acpi_device_hid(adev), acpi_pdata->hid))
- return &acpi_pdata->pdata;
+ if (!strcmp(acpi_device_hid(adev), acpi_pdata->hid)) {
+ /*
+ * Remove previously installed properties if we
+ * are probing this device not for the very first
+ * time.
+ */
+ device_remove_properties(&client->dev);
+
+ /*
+ * Now install the platform-specific properties
+ * that are missing from ACPI.
+ */
+ device_add_properties(&client->dev, acpi_pdata->props);
+ break;
+ }
acpi_pdata++;
}
- return ERR_PTR(-ENOENT);
+ return 0;
}
#else
-static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+static int mxt_prepare_acpi_properties(struct i2c_client *client)
{
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
}
#endif
-static const struct mxt_platform_data *
-mxt_get_platform_data(struct i2c_client *client)
-{
- const struct mxt_platform_data *pdata;
-
- pdata = dev_get_platdata(&client->dev);
- if (pdata)
- return pdata;
-
- pdata = mxt_parse_dt(client);
- if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
- return pdata;
-
- pdata = mxt_parse_acpi(client);
- if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
- return pdata;
-
- dev_err(&client->dev, "No platform data specified\n");
- return ERR_PTR(-EINVAL);
-}
+static const struct dmi_system_id chromebook_T9_suspend_dmi[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ },
+ { }
+};
static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct mxt_data *data;
- const struct mxt_platform_data *pdata;
int error;
- pdata = mxt_get_platform_data(client);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ /*
+ * Ignore ACPI devices representing bootloader mode.
+ *
+ * This is a bit of a hack: Google Chromebook BIOS creates ACPI
+ * devices for both application and bootloader modes, but we are
+ * interested in application mode only (if device is in bootloader
+ * mode we'll end up switching into application anyway). So far
+ * application mode addresses were all above 0x40, so we'll use it
+ * as a threshold.
+ */
+ if (ACPI_COMPANION(&client->dev) && client->addr < 0x40)
+ return -ENXIO;
data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL);
if (!data)
@@ -3127,7 +3176,6 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
client->adapter->nr, client->addr);
data->client = client;
- data->pdata = pdata;
data->irq = client->irq;
i2c_set_clientdata(client, data);
@@ -3135,6 +3183,17 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
init_completion(&data->reset_completion);
init_completion(&data->crc_completion);
+ data->suspend_mode = dmi_check_system(chromebook_T9_suspend_dmi) ?
+ MXT_SUSPEND_T9_CTRL : MXT_SUSPEND_DEEP_SLEEP;
+
+ error = mxt_prepare_acpi_properties(client);
+ if (error && error != -ENOENT)
+ return error;
+
+ error = mxt_parse_device_properties(data);
+ if (error)
+ return error;
+
data->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(data->reset_gpio)) {
@@ -3144,8 +3203,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mxt_interrupt,
- pdata->irqflags | IRQF_ONESHOT,
+ NULL, mxt_interrupt, IRQF_ONESHOT,
client->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
@@ -3239,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
static const struct of_device_id mxt_of_match[] = {
{ .compatible = "atmel,maxtouch", },
+ /* Compatibles listed below are deprecated */
+ { .compatible = "atmel,qt602240_ts", },
+ { .compatible = "atmel,atmel_mxt_ts", },
+ { .compatible = "atmel,atmel_mxt_tp", },
+ { .compatible = "atmel,mXT224", },
{},
};
MODULE_DEVICE_TABLE(of, mxt_of_match);
@@ -3265,7 +3328,7 @@ MODULE_DEVICE_TABLE(i2c, mxt_id);
static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
- .of_match_table = of_match_ptr(mxt_of_match),
+ .of_match_table = mxt_of_match,
.acpi_match_table = ACPI_PTR(mxt_acpi_id),
.pm = &mxt_pm_ops,
},
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2a99f0f14795..8fb8c737fffe 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,7 +83,6 @@
static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
static DEFINE_SPINLOCK(pd_bitmap_lock);
-static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */
static LLIST_HEAD(dev_data_list);
@@ -3562,6 +3561,7 @@ EXPORT_SYMBOL(amd_iommu_device_info);
*****************************************************************************/
static struct irq_chip amd_ir_chip;
+static DEFINE_SPINLOCK(iommu_table_lock);
static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
{
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f05f3cf90756..ddcbbdb5d658 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
* @list: Reserved region list from iommu_get_resv_regions()
*
* IOMMU drivers can use this to implement their .get_resv_regions callback
- * for general non-IOMMU-specific reservations. Currently, this covers host
- * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
- * based ARM platforms that may require HW MSI reservation.
+ * for general non-IOMMU-specific reservations. Currently, this covers GICv3
+ * ITS region reservation on ACPI based ARM platforms that may require HW MSI
+ * reservation.
*/
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
- struct pci_host_bridge *bridge;
- struct resource_entry *window;
-
- if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
- iort_iommu_msi_get_resv_regions(dev, list) < 0)
- return;
-
- if (!dev_is_pci(dev))
- return;
-
- bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
- resource_list_for_each_entry(window, &bridge->windows) {
- struct iommu_resv_region *region;
- phys_addr_t start;
- size_t length;
-
- if (resource_type(window->res) != IORESOURCE_MEM)
- continue;
- start = window->res->start - window->offset;
- length = window->res->end - window->res->start + 1;
- region = iommu_alloc_resv_region(start, length, 0,
- IOMMU_RESV_RESERVED);
- if (!region)
- return;
+ if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
+ iort_iommu_msi_get_resv_regions(dev, list);
- list_add_tail(&region->list, list);
- }
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0;
}
+static void iova_reserve_pci_windows(struct pci_dev *dev,
+ struct iova_domain *iovad)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ struct resource_entry *window;
+ unsigned long lo, hi;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_type(window->res) != IORESOURCE_MEM)
+ continue;
+
+ lo = iova_pfn(iovad, window->res->start - window->offset);
+ hi = iova_pfn(iovad, window->res->end - window->offset);
+ reserve_iova(iovad, lo, hi);
+ }
+}
+
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{
@@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions);
int ret = 0;
+ if (dev_is_pci(dev))
+ iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index accf58388bdb..460bed4fc5b1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
struct qi_desc desc;
if (mask) {
- BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 66f69af2c219..3062a154a9fb 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
/* Update the hardware only if the interrupt is in remapped mode. */
- if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
modify_irte(&ir_data->irq_2_iommu, irte);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 5fc8656c60f9..0468acfa131f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev,
data->iommu = platform_get_drvdata(iommu_dev);
dev->archdata.iommu = data;
- of_dev_put(iommu_dev);
+ platform_device_put(iommu_dev);
return 0;
}
@@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_clocks; ++i)
iommu->clocks[i].id = rk_iommu_clocks[i];
+ /*
+ * iommu clocks should be present for all new devices and devicetrees
+ * but there are older devicetrees without clocks out in the wild.
+ * So clocks as optional for the time being.
+ */
err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
- if (err)
+ if (err == -ENOENT)
+ iommu->num_clocks = 0;
+ else if (err)
return err;
err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index f31265937439..7f0c0be322e0 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc)
bit = readl_relaxed(combiner->regs[reg].addr);
status = bit & combiner->regs[reg].enabled;
- if (!status)
+ if (bit && !status)
pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
smp_processor_id(), bit,
combiner->regs[reg].enabled,
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
index a6e87076acc2..5336bbdbfdc5 100644
--- a/drivers/isdn/mISDN/dsp_hwec.c
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
goto _do;
{
- char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
- strcpy(_dup, arg);
- dup = _dup;
+ dup = kstrdup(arg, GFP_ATOMIC);
+ if (!dup)
+ return;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
@@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
deftaps = tmp;
}
}
+
+ kfree(dup);
}
_do:
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 21d50e4cc5e1..b05022f94f18 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
- u8 frame[len + 32];
+ u8 frame[MAX_DFRAME_LEN_L1 + 32];
struct socket *socket = NULL;
if (debug & DEBUG_L1OIP_MSG)
@@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
+ /*
+ * This is technically bounded by L1OIP_MAX_PERFRAME but
+ * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
+ */
+ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
@@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
- ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
+ /*
+ * This is technically bounded by L1OIP_MAX_PERFRAME but
+ * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
+ */
+ ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 004cc3cc6123..7fa2631b422c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -290,7 +290,7 @@ do { \
if (kthread_should_stop() || \
test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
set_current_state(TASK_RUNNING); \
- return 0; \
+ goto out; \
} \
\
schedule(); \
@@ -378,6 +378,9 @@ retry_invalidate:
bch_prio_write(ca);
}
}
+out:
+ wait_for_kthread_stop();
+ return 0;
}
/* Allocation */
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d338b7086013..3a0cfb237af9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -392,6 +392,8 @@ struct cached_dev {
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors;
unsigned error_limit;
+
+ char backing_dev_name[BDEVNAME_SIZE];
};
enum alloc_reserve {
@@ -464,6 +466,8 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
+
+ char cache_dev_name[BDEVNAME_SIZE];
};
struct gc_stat {
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 028f7b386e01..d030ce3025a6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b)
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
- char name[BDEVNAME_SIZE];
struct bio *check;
struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 };
@@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bv.bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
- bdevname(dc->bdev, name),
+ dc->backing_dev_name,
(uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
@@ -251,7 +250,9 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- bcache_debug = debugfs_create_dir("bcache", NULL);
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+ bcache_debug = debugfs_create_dir("bcache", NULL);
return IS_ERR_OR_NULL(bcache_debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 7fac97ae036e..2ddf8515e6a5 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{
- char buf[BDEVNAME_SIZE];
unsigned errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
@@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable",
- bio_devname(bio, buf));
+ dc->backing_dev_name);
else
bch_cached_dev_error(dc);
}
@@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca,
}
if (error) {
- char buf[BDEVNAME_SIZE];
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors);
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
pr_err("%s: IO error on %s%s",
- bdevname(ca->bdev, buf), m,
+ ca->cache_dev_name, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
- bdevname(ca->bdev, buf), m);
+ ca->cache_dev_name, m);
}
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a65e3365eeb9..8e3e8655ed63 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- char buf[BDEVNAME_SIZE];
-
- bio_devname(bio, buf);
pr_err("Can't flush %s: returned bi_status %i",
- buf, bio->bi_status);
+ dc->backing_dev_name, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index d90d9e59ca00..3dea06b41d43 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
- char buf[BDEVNAME_SIZE];
struct closure cl;
closure_init_stack(&cl);
@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
+ pr_info("Caching disabled for %s", dc->backing_dev_name);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
{
uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u;
- char buf[BDEVNAME_SIZE];
struct cached_dev *exist_dc, *t;
- bdevname(dc->bdev, buf);
-
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached", buf);
+ pr_err("Can't attach %s: already attached",
+ dc->backing_dev_name);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down", buf);
+ pr_err("Can't attach %s: shutting down",
+ dc->backing_dev_name);
return -EINVAL;
}
if (dc->sb.block_size < c->sb.block_size) {
/* Will die */
pr_err("Couldn't attach %s: block size less than set's block size",
- buf);
+ dc->backing_dev_name);
return -EINVAL;
}
@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
pr_err("Tried to attach %s but duplicate UUID already attached",
- buf);
+ dc->backing_dev_name);
return -EINVAL;
}
@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set", buf);
+ pr_err("Couldn't find uuid for %s in set",
+ dc->backing_dev_name);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID", buf);
+ pr_err("Not caching %s, no room for UUID",
+ dc->backing_dev_name);
return -EINVAL;
}
}
@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
up_write(&dc->writeback_lock);
pr_info("Caching %s as %s on set %pU",
- bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+ dc->backing_dev_name,
+ dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
return 0;
}
@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
- char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
struct cache_set *c;
+ bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
+
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s", bdevname(bdev, name));
+ pr_info("registered backing device %s", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices);
list_for_each_entry(c, &bch_cache_sets, list)
@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
return;
err:
- pr_notice("error %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk);
}
@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
bool bch_cached_dev_error(struct cached_dev *dc)
{
- char name[BDEVNAME_SIZE];
+ struct cache_set *c;
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return false;
@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
smp_mb();
pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, bdevname(dc->bdev, name));
+ dc->disk.disk->disk_name, dc->backing_dev_name);
+
+ /*
+ * If the cached device is still attached to a cache set,
+ * even dc->io_disable is true and no more I/O requests
+ * accepted, cache device internal I/O (writeback scan or
+ * garbage collection) may still prevent bcache device from
+ * being stopped. So here CACHE_SET_IO_DISABLE should be
+ * set to c->flags too, to make the internal I/O to cache
+ * device rejected and stopped immediately.
+ * If c is NULL, that means the bcache device is not attached
+ * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
+ */
+ c = dc->disk.c;
+ if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ pr_info("CACHE_SET_IO_DISABLE already set");
bcache_device_stop(&dc->disk);
return true;
@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return false;
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
- pr_warn("CACHE_SET_IO_DISABLE already set");
+ pr_info("CACHE_SET_IO_DISABLE already set");
/* XXX: we can be called from atomic context
acquire_console_sem();
@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
*/
pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
d->disk->disk_name);
+ /*
+ * There might be a small time gap that cache set is
+ * released but bcache device is not. Inside this time
+ * gap, regular I/O requests will directly go into
+ * backing device as no cache set attached to. This
+ * behavior may also introduce potential inconsistence
+ * data in writeback mode while cache is dirty.
+ * Therefore before calling bcache_device_stop() due
+ * to a broken cache device, dc->io_disable should be
+ * explicitly set to true.
+ */
+ dc->io_disable = true;
+ /* make others know io_disable is true earlier */
+ smp_mb();
bcache_device_stop(d);
} else {
/*
@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
- char name[BDEVNAME_SIZE];
const char *err = NULL; /* must be set for any error case */
int ret = 0;
- bdevname(bdev, name);
-
+ bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto out;
}
- pr_info("registered cache device %s", name);
+ pr_info("registered cache device %s", ca->cache_dev_name);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s", name, err);
+ pr_notice("error %s: %s", ca->cache_dev_name, err);
return ret;
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4a9547cdcdc5..ad45ebe1a74b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
- if (bio->bi_status)
+ if (bio->bi_status) {
SET_KEY_DIRTY(&w->key, false);
+ bch_count_backing_io_errors(io->dc, bio);
+ }
closure_put(&io->cl);
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 12aa9ca21d8c..dc385b70e4c3 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
- snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size);
- c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN,
+ unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
+ snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+ c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_cache) {
r = -ENOMEM;
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 1d0af0a21fc7..84814e819e4c 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b)
atomic_read(&b->pending_demotes) >= b->max_work;
}
-struct bt_work *alloc_work(struct background_tracker *b)
+static struct bt_work *alloc_work(struct background_tracker *b)
{
if (max_work_reached(b))
return NULL;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 77d9fe58dae2..514fb4aec5d1 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
unsigned i;
for (i = 0; i < ic->journal_sections; i++)
kvfree(sl[i]);
- kfree(sl);
+ kvfree(sl);
}
static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 580c49cc8079..5903e492bb34 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -23,6 +23,8 @@
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
+#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
+
#define DM_RAID1_HANDLE_ERRORS 0x01
#define DM_RAID1_KEEP_LOG 0x02
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti)
unsigned long error_bits;
unsigned int i;
- struct dm_io_region io[ms->nr_mirrors];
+ struct dm_io_region io[MAX_NR_MIRRORS];
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
@@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context)
static void do_write(struct mirror_set *ms, struct bio *bio)
{
unsigned int i;
- struct dm_io_region io[ms->nr_mirrors], *dest = io;
+ struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
@@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argc -= args_used;
if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
- nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
+ nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
ti->error = "Invalid number of mirrors";
dm_dirty_log_destroy(dl);
return -EINVAL;
@@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
int num_feature_args = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
- char buffer[ms->nr_mirrors + 1];
+ char buffer[MAX_NR_MIRRORS + 1];
switch (type) {
case STATUSTYPE_INFO:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4ea404dbcf0b..0a7b0107ca78 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
- sector_t sector, int *srcu_idx)
+ sector_t sector, int *srcu_idx)
+ __acquires(md->io_barrier)
{
struct dm_table *map;
struct dm_target *ti;
@@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
}
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
@@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
}
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3bea45e8ccff..c208c01f63a5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr)
check_sb_changes(mddev, rdev);
/* Read all rdev's to update recovery_offset */
- rdev_for_each_rcu(rdev, mddev)
- read_rdev(mddev, rdev);
+ rdev_for_each_rcu(rdev, mddev) {
+ if (!test_bit(Faulty, &rdev->flags))
+ read_rdev(mddev, rdev);
+ }
}
EXPORT_SYMBOL(md_reload_sb);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2943fb74056..e9e3308cb0a7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
+static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
int idx = sector_to_idx(sector_nr);
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
* max resync count which allowed on current I/O barrier bucket.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen &&
+ (!conf->array_frozen &&
!atomic_read(&conf->nr_pending[idx]) &&
- atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
+ atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
+ test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
conf->resync_lock);
+ if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ atomic_dec(&conf->barrier[idx]);
+ spin_unlock_irq(&conf->resync_lock);
+ wake_up(&conf->wait_barrier);
+ return -EINTR;
+ }
+
atomic_inc(&conf->nr_sync_pending);
spin_unlock_irq(&conf->resync_lock);
+
+ return 0;
}
static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
goto skip_copy;
}
+ behind_bio->bi_write_hint = bio->bi_write_hint;
+
while (i < vcnt && size) {
struct page *page;
int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- r1_bio = raid1_alloc_init_r1buf(conf);
- raise_barrier(conf, sector_nr);
+
+ if (raise_barrier(conf, sector_nr))
+ return 0;
+
+ r1_bio = raid1_alloc_init_r1buf(conf);
rcu_read_lock();
/*
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index e216cd768409..b07114b5efb2 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -20,7 +20,7 @@
//
// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
//
-// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
// SAA7111, SAA7113 and SAA7118 support
#include "saa711x_regs.h"
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index a50d480e101a..44fabe08234d 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: GPL-2.0+
* saa711x - Philips SAA711x video decoder register specifications
*
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#define R_00_CHIP_VERSION 0x00
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 1c5c61d829d6..9b4f21237810 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -8,7 +8,7 @@
* Muting and tone control by Jonathan Isom <jisom@ematic.com>
*
* Copyright (c) 2000 Eric Sandeen <eric_sandeen@bigfoot.com>
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* This code is placed under the terms of the GNU General Public License
* Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
* Which was based on tda8425.c by Greg Alexander (c) 1998
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 2476d812f669..1734ed4ede33 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -2,7 +2,7 @@
//
// tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver
//
-// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org>
#include <dt-bindings/media/tvp5150.h>
#include <linux/i2c.h>
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index c43b7b844021..d3a764cae1a0 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -3,7 +3,7 @@
*
* tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
*
- * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#define TVP5150_VD_IN_SRC_SEL_1 0x00 /* Video input source selection #1 */
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index a26c1a3f7183..4599b7e28a8d 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -5,7 +5,7 @@
* Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
*
* This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
* the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
* and the TVP7002 driver in the TI LSP 2.10.00.14. Revisions by
* Muralidharan Karicheri and Snehaprabha Narnakaje (TI).
diff --git a/drivers/media/i2c/tvp7002_reg.h b/drivers/media/i2c/tvp7002_reg.h
index 3c8c8b0a6a4c..7f56ba689dfe 100644
--- a/drivers/media/i2c/tvp7002_reg.h
+++ b/drivers/media/i2c/tvp7002_reg.h
@@ -5,7 +5,7 @@
* Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
*
* This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
* the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
* and the TVP7002 driver in the TI LSP 2.10.00.14
*
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 67ac51eff15c..6b87a721dc49 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -4,7 +4,7 @@
* Copyright (C) 2010 Nokia Corporation
*
* Based on drivers/media/video/v4l2_dev.c code authored by
- * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
* Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
*
* Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c
index 9f1f9169fb5b..346fc7f58839 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.c
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c
@@ -1,7 +1,7 @@
/*
* Handlers for board audio hooks, splitted from bttv-cards
*
- * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* This code is placed under the terms of the GNU General Public License
*/
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h
index 159d07adeff8..be16a537a03a 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.h
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h
@@ -1,7 +1,7 @@
/*
* Handlers for board audio hooks, splitted from bttv-cards
*
- * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* This code is placed under the terms of the GNU General Public License
*/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 1902732f90e1..2616243b2c49 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2447,7 +2447,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x88---------------------------------- */
[BTTV_BOARD_ACORP_Y878F] = {
- /* Mauro Carvalho Chehab <mchehab@infradead.org> */
+ /* Mauro Carvalho Chehab <mchehab@kernel.org> */
.name = "Acorp Y878F",
.video_inputs = 3,
/* .audio_inputs= 1, */
@@ -2688,7 +2688,7 @@ struct tvcard bttv_tvcards[] = {
},
[BTTV_BOARD_ENLTV_FM_2] = {
/* Encore TV Tuner Pro ENL TV-FM-2
- Mauro Carvalho Chehab <mchehab@infradead.org */
+ Mauro Carvalho Chehab <mchehab@kernel.org> */
.name = "Encore ENL TV-FM-2",
.video_inputs = 3,
/* .audio_inputs= 1, */
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 707f57a9f940..de3f44b8dec6 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -13,7 +13,7 @@
(c) 2005-2006 Nickolay V. Shmyrev <nshmyrev@yandex.ru>
Fixes to be fully V4L2 compliant by
- (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
Cropping and overscan support
Copyright (C) 2005, 2006 Michael H. Schimek <mschimek@gmx.at>
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index eccd1e3d717a..c76823eb399d 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -8,7 +8,7 @@
& Marcus Metzler (mocm@thp.uni-koeln.de)
(c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
- (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
- Multituner support and i2c address binding
This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index be49589a61d2..395ff9bba759 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -13,7 +13,7 @@
* Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
* Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
* Markus Rechberger <mrechberger@gmail.com>
- * Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Mauro Carvalho Chehab <mchehab@kernel.org>
* Sascha Sommer <saschasommer@freenet.de>
* Copyright (C) 2004, 2005 Chris Pascoe
* Copyright (C) 2003, 2004 Gerd Knorr
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index ab09bb55cf45..8a28fda703a2 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -4,7 +4,7 @@
*
* (c) 2007 Trent Piepho <xyzzy@speakeasy.org>
* (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org>
- * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
* Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org>
* Based on dummy.c by Jaroslav Kysela <perex@perex.cz>
*
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 0e0952e60795..7a4876cf9f08 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -5,7 +5,7 @@
* (c) 2004 Jelle Foks <jelle@foks.us>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org>
*
- * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* - video_ioctl2 conversion
*
* Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/>
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 8bfa5b7ed91b..60988e95b637 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -4,7 +4,7 @@
*
* (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
- * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* - Multituner support
* - video_ioctl2 conversion
* - PAL/M fixes
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index f7692775fb5a..99f88a05a7c9 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -8,7 +8,7 @@
* (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
* (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
*
- * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
* - Multituner support and i2c address binding
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 9be682cdb644..7b113bad70d2 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -5,7 +5,7 @@
*
* (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
- * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
* - Multituner support
* - video_ioctl2 conversion
* - PAL/M fixes
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 5ef635e72e10..4c52ac6d8bc5 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -4,7 +4,7 @@
* Copyright 1997 M. Kirkwood
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 9e12c6027359..840b7d60462b 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -2,7 +2,7 @@
* radio-aztech.c - Aztech radio card driver
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Adapted to support the Video for Linux API by
* Russell Kroll <rkroll@exploits.org>. Based on original tuner code by:
*
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 3ff4c4e1435f..f051f8694ab9 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -15,7 +15,7 @@
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Note: this card seems to swap the left and right audio channels!
*
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 95f06f3b35dc..e4e758739246 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -27,7 +27,7 @@
* BUGS:
* - card unmutes if you change frequency
*
- * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@infradead.org>:
+ * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@kernel.org>:
* - Conversion to V4L2 API
* - Uses video_ioctl2 for parsing and to add debug support
*/
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index abeaedd8d437..5a1470eb753e 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -7,7 +7,7 @@
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Fully tested with actual hardware and the v4l2-compliance tool.
*/
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index fc4e63d36e4c..4f9b97edd9eb 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -13,7 +13,7 @@
* No volume control - only mute/unmute - you have to use line volume
* control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD
*
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <linux/kernel.h> /* __setup */
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 4f116ea294fb..1af8f29cc7d1 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -17,7 +17,7 @@
* Volume Control is done digitally
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <linux/module.h> /* Modules */
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 26a8c6002121..a4bad322ffff 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -12,7 +12,7 @@
* Scott McGrath (smcgrath@twilight.vtc.vsc.edu)
* William McGrath (wmcgrath@twilight.vtc.vsc.edu)
*
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <stdarg.h>
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index eb72a4d13758..d0d67ad85b8f 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -25,7 +25,7 @@
* The frequency change is necessary since the card never seems to be
* completely silent.
*
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <linux/module.h> /* Modules */
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 026e88eef29c..6007cd09b328 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -27,7 +27,7 @@
* 2002-07-15 - Fix Stereo typo
*
* 2006-07-24 - Converted to V4L2 API
- * by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
*
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index f6977df1a75b..d275d98d066a 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -12,7 +12,7 @@
*
* On Avermedia M135A with IR model RM-JX, the same codes exist on both
* Positivo (BR) and original IR, initial version and remote control codes
- * added by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * added by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Positivo also ships Avermedia M135A with model RM-K6, extra control
* codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
index e4e78c1f4123..057c13b765ef 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
/* Encore ENLTV-FM v5.3
- Mauro Carvalho Chehab <mchehab@infradead.org>
+ Mauro Carvalho Chehab <mchehab@kernel.org>
*/
static struct rc_map_table encore_enltv_fm53[] = {
diff --git a/drivers/media/rc/keymaps/rc-encore-enltv2.c b/drivers/media/rc/keymaps/rc-encore-enltv2.c
index c3d4437a6fda..cd0555924456 100644
--- a/drivers/media/rc/keymaps/rc-encore-enltv2.c
+++ b/drivers/media/rc/keymaps/rc-encore-enltv2.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
- Mauro Carvalho Chehab <mchehab@infradead.org> */
+ Mauro Carvalho Chehab <mchehab@kernel.org> */
static struct rc_map_table encore_enltv2[] = {
{ 0x4c, KEY_POWER2 },
diff --git a/drivers/media/rc/keymaps/rc-kaiomy.c b/drivers/media/rc/keymaps/rc-kaiomy.c
index f0f88df18606..a00051339842 100644
--- a/drivers/media/rc/keymaps/rc-kaiomy.c
+++ b/drivers/media/rc/keymaps/rc-kaiomy.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
/* Kaiomy TVnPC U2
- Mauro Carvalho Chehab <mchehab@infradead.org>
+ Mauro Carvalho Chehab <mchehab@kernel.org>
*/
static struct rc_map_table kaiomy[] = {
diff --git a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
index 453e04377de7..db5edde3eeb1 100644
--- a/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
+++ b/drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
/* Kworld Plus TV Analog Lite PCI IR
- Mauro Carvalho Chehab <mchehab@infradead.org>
+ Mauro Carvalho Chehab <mchehab@kernel.org>
*/
static struct rc_map_table kworld_plus_tv_analog[] = {
diff --git a/drivers/media/rc/keymaps/rc-pixelview-new.c b/drivers/media/rc/keymaps/rc-pixelview-new.c
index 791130f108ff..e4e34f2ccf74 100644
--- a/drivers/media/rc/keymaps/rc-pixelview-new.c
+++ b/drivers/media/rc/keymaps/rc-pixelview-new.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
/*
- Mauro Carvalho Chehab <mchehab@infradead.org>
+ Mauro Carvalho Chehab <mchehab@kernel.org>
present on PV MPEG 8000GT
*/
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index 88b3e80c38ad..d78a2bdb3e36 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -2,7 +2,7 @@
// For Philips TEA5761 FM Chip
// I2C address is always 0x20 (0x10 at 7-bit mode).
//
-// Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2005-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
#include <linux/i2c.h>
#include <linux/slab.h>
@@ -337,5 +337,5 @@ EXPORT_SYMBOL_GPL(tea5761_attach);
EXPORT_SYMBOL_GPL(tea5761_autodetection);
MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index 2b2c064d7dc3..016d0d5ec50b 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -2,7 +2,7 @@
// For Philips TEA5767 FM Chip used on some TV Cards like Prolink Pixelview
// I2C address is always 0xC0.
//
-// Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// tea5767 autodetection thanks to Torsten Seeboth and Atsushi Nakagawa
// from their contributions on DScaler.
@@ -469,5 +469,5 @@ EXPORT_SYMBOL_GPL(tea5767_attach);
EXPORT_SYMBOL_GPL(tea5767_autodetection);
MODULE_DESCRIPTION("Philips TEA5767 FM tuner driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index bb0437c36c03..50d017a4822a 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -5,7 +5,7 @@
* This file includes internal tipes to be used inside tuner-xc2028.
* Shouldn't be included outside tuner-xc2028
*
- * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
/* xc3028 firmware types */
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index fca85e08ebd7..84744e138982 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// tuner-xc2028
//
-// Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// Copyright (c) 2007 Michel Ludwig (michel.ludwig@gmail.com)
// - frontend interface
@@ -1518,7 +1518,7 @@ EXPORT_SYMBOL(xc2028_attach);
MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE);
MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE);
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 03fd6d4233a4..7b58bc06e35c 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: GPL-2.0
* tuner-xc2028
*
- * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#ifndef __TUNER_XC2028_H__
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 3c2694a16ed1..d1e66b503f4d 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -2,7 +2,7 @@
//
// em28xx-camera.c - driver for Empia EM25xx/27xx/28xx USB video capture devices
//
-// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
// Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
//
// This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 6e0e67d23876..7c3203d7044b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -5,7 +5,7 @@
//
// Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
// Markus Rechberger <mrechberger@gmail.com>
-// Mauro Carvalho Chehab <mchehab@infradead.org>
+// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
//
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 36d341fb65dd..f28995383090 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -4,7 +4,7 @@
//
// Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
// Markus Rechberger <mrechberger@gmail.com>
-// Mauro Carvalho Chehab <mchehab@infradead.org>
+// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
//
@@ -32,7 +32,7 @@
#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
"Markus Rechberger <mrechberger@gmail.com>, " \
- "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+ "Mauro Carvalho Chehab <mchehab@kernel.org>, " \
"Sascha Sommer <saschasommer@freenet.de>"
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a54cb8dc52c9..3f493e0b0716 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -2,7 +2,7 @@
//
// DVB device driver for em28xx
//
-// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
+// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
// - Fixes for the driver to properly work with HVR-950
@@ -63,7 +63,7 @@
#include "tc90522.h"
#include "qm1d1c0042.h"
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
MODULE_VERSION(EM28XX_VERSION);
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 9151bccd859a..6458682bc6e2 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -4,7 +4,7 @@
//
// Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
// Markus Rechberger <mrechberger@gmail.com>
-// Mauro Carvalho Chehab <mchehab@infradead.org>
+// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
//
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 2dc1be00b8b8..f84a1208d5d3 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -4,7 +4,7 @@
//
// Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
// Markus Rechberger <mrechberger@gmail.com>
-// Mauro Carvalho Chehab <mchehab@infradead.org>
+// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
//
// This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index d70ee13cc52e..68571bf36d28 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -5,7 +5,7 @@
//
// Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
// Markus Rechberger <mrechberger@gmail.com>
-// Mauro Carvalho Chehab <mchehab@infradead.org>
+// Mauro Carvalho Chehab <mchehab@kernel.org>
// Sascha Sommer <saschasommer@freenet.de>
// Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
//
@@ -44,7 +44,7 @@
#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
"Markus Rechberger <mrechberger@gmail.com>, " \
- "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+ "Mauro Carvalho Chehab <mchehab@kernel.org>, " \
"Sascha Sommer <saschasommer@freenet.de>"
static unsigned int isoc_debug;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 63c7c6124707..b0378e77ddff 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com>
* Ludovico Cavedon <cavedon@sssup.it>
- * Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
*
* Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de>
diff --git a/drivers/media/usb/gspca/zc3xx-reg.h b/drivers/media/usb/gspca/zc3xx-reg.h
index a1bd94e8ce52..71fda38e85e0 100644
--- a/drivers/media/usb/gspca/zc3xx-reg.h
+++ b/drivers/media/usb/gspca/zc3xx-reg.h
@@ -1,7 +1,7 @@
/*
* zc030x registers
*
- * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@kernel.org>
*
* The register aliases used here came from this driver:
* http://zc0302.sourceforge.net/zc0302.php
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 70939e96b856..23df50aa0a4a 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
//
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 23a1332d98e6..d3229aa45fcb 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
//
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
// - DVB-T support
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index c9a62bbff27a..659b63febf85 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
//
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
// - Fix SMBus Read Byte command
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index 21587fcf11e3..d10424673db9 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: GPL-2.0
* tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
/*
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index 5c615b0a7a46..b275dbce3a1b 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: GPL-2.0
* tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
*/
#include <linux/videodev2.h>
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index b2399d4266da..aa85fe31c835 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
//
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
//
// Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
// - Fixed module load/unload
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index e1e45770e28d..0864ed7314eb 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -2,7 +2,7 @@
* SPDX-License-Identifier: GPL-2.0
* tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
* - DVB-T support
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 1d0b2208e8fb..c080dcc75393 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
*
* Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
* - Added procfs support
@@ -1072,7 +1072,7 @@ static void __exit videodev_exit(void)
subsys_initcall(videodev_init);
module_exit(videodev_exit)
-MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f48c505550e0..de5d96dbe69e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- * Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
*/
#include <linux/mm.h>
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 2b3981842b4b..7491b337002c 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1,11 +1,11 @@
/*
* generic helper functions for handling video4linux capture buffers
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
* (c) 2006 Ted Walther and John Sokol
*
* This program is free software; you can redistribute it and/or modify
@@ -38,7 +38,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index e02353e340dd..f46132504d88 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008 Magnus Damm
*
* Based on videobuf-vmalloc.c,
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index add2edb23eac..7770034aae28 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -6,11 +6,11 @@
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* Highly based on video-buf written originally by:
* (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
* (c) 2006 Ted Walther and John Sokol
*
* This program is free software; you can redistribute it and/or modify
@@ -48,7 +48,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 2ff7fcc77b11..45fe781aeeec 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -6,7 +6,7 @@
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index 71a89d5d3efd..db8043019ec6 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -16,77 +16,7 @@
int main(void)
{
- DEFINE(EMIF_SDCFG_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_sdcfg_val));
- DEFINE(EMIF_TIMING1_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_timing1_val));
- DEFINE(EMIF_TIMING2_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_timing2_val));
- DEFINE(EMIF_TIMING3_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_timing3_val));
- DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
- DEFINE(EMIF_ZQCFG_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_zqcfg_val));
- DEFINE(EMIF_PMCR_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_pmcr_val));
- DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
- DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
- DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
- offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
- DEFINE(EMIF_COS_CONFIG_OFFSET,
- offsetof(struct emif_regs_amx3, emif_cos_config));
- DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
- offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
- DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
- offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
- DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
- offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
- DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_ocp_config_val));
- DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
- offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
- DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
- offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
- DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
- offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
- DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
- offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
- DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
- offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
- DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
- offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
- DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
-
- BLANK();
-
- DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
- offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
- DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
- offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
- DEFINE(EMIF_PM_CONFIG_OFFSET,
- offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
- DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
- offsetof(struct ti_emif_pm_data, regs_virt));
- DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
- offsetof(struct ti_emif_pm_data, regs_phys));
- DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
-
- BLANK();
-
- DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
- offsetof(struct ti_emif_pm_functions, save_context));
- DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
- offsetof(struct ti_emif_pm_functions, restore_context));
- DEFINE(EMIF_PM_ENTER_SR_OFFSET,
- offsetof(struct ti_emif_pm_functions, enter_sr));
- DEFINE(EMIF_PM_EXIT_SR_OFFSET,
- offsetof(struct ti_emif_pm_functions, exit_sr));
- DEFINE(EMIF_PM_ABORT_SR_OFFSET,
- offsetof(struct ti_emif_pm_functions, abort_sr));
- DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
+ ti_emif_asm_offsets();
return 0;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 231f3a1e27bf..86503f60468f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index e4fafdd96e5e..eafd06f62a3a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -305,8 +305,8 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
resp = (struct ec_response_motion_sense *)msg->data;
sensor_num = resp->dump.sensor_count;
- /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
- sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+ /* Allocate 1 extra sensors in FIFO are needed */
+ sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 1),
GFP_KERNEL);
if (sensor_cells == NULL)
goto error;
@@ -362,16 +362,10 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
sensor_type[resp->info.type]++;
id++;
}
- if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
- sensor_platforms[id].sensor_num = sensor_num;
- sensor_cells[id].name = "cros-ec-angle";
- sensor_cells[id].id = 0;
- sensor_cells[id].platform_data = &sensor_platforms[id];
- sensor_cells[id].pdata_size =
- sizeof(struct cros_ec_sensor_platform);
- id++;
- }
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
sensor_cells[id].name = "cros-ec-ring";
id++;
@@ -424,6 +418,14 @@ static int ec_device_probe(struct platform_device *pdev)
goto failed;
}
+ /* check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+ cros_ec_sensors_register(ec);
+
+ /* Take control of the lightbar from the EC. */
+ lb_manual_suspend_ctrl(ec, 1);
+
+ /* We can now add the sysfs class, we know which parameter to show */
retval = cdev_device_add(&ec->cdev, &ec->class_dev);
if (retval) {
dev_err(dev, "cdev_device_add failed => %d\n", retval);
@@ -433,13 +435,6 @@ static int ec_device_probe(struct platform_device *pdev)
if (cros_ec_debugfs_init(ec))
dev_warn(dev, "failed to create debugfs directory\n");
- /* check whether this EC is a sensor hub. */
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
- cros_ec_sensors_register(ec);
-
- /* Take control of the lightbar from the EC. */
- lb_manual_suspend_ctrl(ec, 1);
-
return 0;
failed:
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a4c9c8297a6d..918d4fb742d1 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -717,6 +717,7 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+ bool tunneled_ops_supported;
/*
* number of contexts mapped on to this card. Possible values are:
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 83f1d08058fc..4d6736f9d463 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1742,6 +1742,15 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
/* Required for devices using CAPP DMA mode, harmless for others */
pci_set_master(dev);
+ adapter->tunneled_ops_supported = false;
+
+ if (cxl_is_power9()) {
+ if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
+ dev_info(&dev->dev, "Tunneled operations unsupported\n");
+ else
+ adapter->tunneled_ops_supported = true;
+ }
+
if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
goto err;
@@ -1768,6 +1777,9 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
{
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+ if (cxl_is_power9())
+ pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
+
cxl_native_release_psl_err_irq(adapter);
cxl_unmap_adapter_regs(adapter);
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 95285b7f636f..4b5a4c5d3c01 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -78,6 +78,15 @@ static ssize_t psl_timebase_synced_show(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
}
+static ssize_t tunneled_ops_supported_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
+}
+
static ssize_t reset_adapter_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -183,6 +192,7 @@ static struct device_attribute adapter_attrs[] = {
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
__ATTR_RO(psl_timebase_synced),
+ __ATTR_RO(tunneled_ops_supported),
__ATTR_RW(load_image_on_perst),
__ATTR_RW(perst_reloads_same_image),
__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0c125f207aea..33053b0d1fdf 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -518,7 +518,7 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata)
if (of_node && of_match_device(at24_of_match, dev))
cdata = of_device_get_match_data(dev);
else if (id)
- cdata = (void *)&id->driver_data;
+ cdata = (void *)id->driver_data;
else
cdata = acpi_device_get_match_data(dev);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8e0acd197c43..6af946d16d24 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io-64-nonatomic-hi-lo.h>
@@ -62,6 +63,17 @@
* need a custom accessor.
*/
+static unsigned long global_flags;
+/*
+ * Workaround for avoiding to use RX DMAC by multiple channels.
+ * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
+ * RX DMAC simultaneously, sometimes hundreds of bytes data are not
+ * stored into the system memory even if the DMAC interrupt happened.
+ * So, this driver then uses one RX DMAC channel only.
+ */
+#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0
+#define SDHI_INTERNAL_DMAC_RX_IN_USE 1
+
/* Definitions for sampling clocks */
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
@@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
RST_RESERVED_BITS | val);
+ if (host->data && host->data->flags & MMC_DATA_READ)
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
@@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
dir = DMA_FROM_DEVICE;
+ if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
+ test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
+ goto force_pio;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
dir = DMA_TO_DEVICE;
@@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
renesas_sdhi_internal_dmac_enable_dma(host, false);
dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir);
+ if (dir == DMA_FROM_DEVICE)
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
@@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* implementation as others may use a different implementation.
*/
static const struct soc_device_attribute gen3_soc_whitelist[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { .soc_id = "r8a7795", .revision = "ES2.0" },
- { .soc_id = "r8a7796", .revision = "ES1.0" },
- { .soc_id = "r8a77995", .revision = "ES1.0" },
- { /* sentinel */ }
+ { .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
+ { .soc_id = "r8a7795", .revision = "ES2.0" },
+ { .soc_id = "r8a7796", .revision = "ES1.0",
+ .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
+ { /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- if (!soc_device_match(gen3_soc_whitelist))
+ const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
+
+ if (!soc)
return -ENODEV;
+ global_flags |= (unsigned long)soc->data;
+
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 787434e5589d..78c25ad35fd2 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
}
-static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev = slot->chip->pdev;
@@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
return 0;
}
+static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ /* AMD requires custom HS200 tuning */
+ if (host->timing == MMC_TIMING_MMC_HS200)
+ return amd_execute_tuning_hs200(host, opcode);
+
+ /* Otherwise perform standard SDHCI tuning */
+ return sdhci_execute_tuning(mmc, opcode);
+}
+
+static int amd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
+ ops->execute_tuning = amd_execute_tuning;
+
+ return 0;
+}
+
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
@@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .platform_execute_tuning = amd_execute_tuning,
};
static const struct sdhci_pci_fixes sdhci_amd = {
.probe = amd_probe,
.ops = &amd_sdhci_pci_ops,
+ .probe_slot = amd_probe_slot,
};
static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index d4c07b85f18e..f5695be14499 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 668e2cbc155b..692902df2598 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index d0cd6f8635d7..9c9f8936b63b 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
ret = nanddev_erase(nand, &pos);
if (ret) {
einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
- einfo->state = MTD_ERASE_FAILED;
return ret;
}
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
nanddev_pos_next_eraseblock(nand, &pos);
}
- einfo->state = MTD_ERASE_DONE;
-
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c
index 9c159f0dd9a6..321137158ff3 100644
--- a/drivers/mtd/nand/onenand/omap2.c
+++ b/drivers/mtd/nand/onenand/omap2.c
@@ -375,56 +375,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
+ struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset, err;
size_t xtra;
- int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
- goto out_copy;
-
- /* panic_write() may be in an interrupt context */
- if (in_interrupt() || oops_in_progress)
+ /*
+ * If the buffer address is not DMA-able, len is not long enough to make
+ * DMA transfers profitable or panic_write() may be in an interrupt
+ * context fallback to PIO mode.
+ */
+ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+ count < 384 || in_interrupt() || oops_in_progress )
goto out_copy;
- if (buf >= high_memory) {
- struct page *p1;
-
- if (((size_t)buf & PAGE_MASK) !=
- ((size_t)(buf + count - 1) & PAGE_MASK))
- goto out_copy;
- p1 = vmalloc_to_page(buf);
- if (!p1)
- goto out_copy;
- buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
- }
-
xtra = count & 3;
if (xtra) {
count -= xtra;
memcpy(buf + count, this->base + bram_offset + count, xtra);
}
+ dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- goto out_copy;
- }
- ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- if (ret) {
- dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ if (dma_mapping_error(dev, dma_dst)) {
+ dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
- return 0;
+ err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+ dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+ if (!err)
+ return 0;
+
+ dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(buf, this->base + bram_offset, count);
@@ -437,49 +423,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
+ struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
- int ret;
+ dma_addr_t dma_src, dma_dst;
+ int bram_offset, err;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
- goto out_copy;
-
- /* panic_write() may be in an interrupt context */
- if (in_interrupt() || oops_in_progress)
+ /*
+ * If the buffer address is not DMA-able, len is not long enough to make
+ * DMA transfers profitable or panic_write() may be in an interrupt
+ * context fallback to PIO mode.
+ */
+ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+ count < 384 || in_interrupt() || oops_in_progress )
goto out_copy;
- if (buf >= high_memory) {
- struct page *p1;
-
- if (((size_t)buf & PAGE_MASK) !=
- ((size_t)(buf + count - 1) & PAGE_MASK))
- goto out_copy;
- p1 = vmalloc_to_page(buf);
- if (!p1)
- goto out_copy;
- buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
- }
-
- dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
+ dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- if (ret) {
- dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
- return 0;
+ err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+ dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
+ if (!err)
+ return 0;
+
+ dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(this->base + bram_offset, buf, count);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 10e953218948..ebb1d141b900 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
return ret;
ret = marvell_nfc_wait_op(chip,
- chip->data_interface.timings.sdr.tPROG_max);
+ PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
return ret;
}
@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
NDCB0_CMD2(NAND_CMD_READSTART);
/*
- * Trigger the naked read operation only on the last chunk.
- * Otherwise, use monolithic read.
+ * Trigger the monolithic read on the first chunk, then naked read on
+ * intermediate chunks and finally a last naked read on the last chunk.
*/
- if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else if (chunk < lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
@@ -1408,6 +1410,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u32 xtype;
int ret;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
@@ -1423,7 +1426,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
* last naked write.
*/
if (chunk == 0) {
- nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ if (lt->nchunks == 1)
+ xtype = XTYPE_MONOLITHIC_RW;
+ else
+ xtype = XTYPE_WRITE_DISPATCH;
+
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_CMD1(NAND_CMD_SEQIN);
nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
@@ -1494,7 +1502,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
}
ret = marvell_nfc_wait_op(chip,
- chip->data_interface.timings.sdr.tPROG_max);
+ PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
marvell_nfc_disable_hw_ecc(chip);
@@ -2299,29 +2307,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
/*
* The legacy "num-cs" property indicates the number of CS on the only
* chip connected to the controller (legacy bindings does not support
- * more than one chip). CS are only incremented one by one while the RB
- * pin is always the #0.
+ * more than one chip). The CS and RB pins are always the #0.
*
* When not using legacy bindings, a couple of "reg" and "nand-rb"
* properties must be filled. For each chip, expressed as a subnode,
* "reg" points to the CS lines and "nand-rb" to the RB line.
*/
- if (pdata) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
nsels = 1;
- } else if (nfc->caps->legacy_of_bindings &&
- !of_get_property(np, "num-cs", &nsels)) {
- dev_err(dev, "missing num-cs property\n");
- return -EINVAL;
- } else if (!of_get_property(np, "reg", &nsels)) {
- dev_err(dev, "missing reg property\n");
- return -EINVAL;
- }
-
- if (!pdata)
- nsels /= sizeof(u32);
- if (!nsels) {
- dev_err(dev, "invalid reg property size\n");
- return -EINVAL;
+ } else {
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (nsels <= 0) {
+ dev_err(dev, "missing/invalid reg property\n");
+ return -EINVAL;
+ }
}
/* Alloc the nand chip structure */
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 72f3a89da513..f28c3a555861 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
+ const struct nand_sdr_timings *timings;
u8 status = 0;
int ret;
if (!chip->exec_op)
return -ENOTSUPP;
+ /* Wait tWB before polling the STATUS reg. */
+ timings = nand_get_sdr_timings(&chip->data_interface);
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
ret = nand_status_op(chip, NULL);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f54518ffb36a..f2052fae21c7 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 4b8e9183489a..5872f31eaa60 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
void __iomem *reg_base = cqspi->iobase;
void __iomem *ahb_base = cqspi->ahb_base;
unsigned int remaining = n_rx;
+ unsigned int mod_bytes = n_rx % 4;
unsigned int bytes_to_read = 0;
+ u8 *rxbuf_end = rxbuf + n_rx;
int ret = 0;
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
}
while (bytes_to_read != 0) {
+ unsigned int word_remain = round_down(remaining, 4);
+
bytes_to_read *= cqspi->fifo_width;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read;
- ioread32_rep(ahb_base, rxbuf,
- DIV_ROUND_UP(bytes_to_read, 4));
+ bytes_to_read = round_down(bytes_to_read, 4);
+ /* Read 4 byte word chunks then single bytes */
+ if (bytes_to_read) {
+ ioread32_rep(ahb_base, rxbuf,
+ (bytes_to_read / 4));
+ } else if (!word_remain && mod_bytes) {
+ unsigned int temp = ioread32(ahb_base);
+
+ bytes_to_read = mod_bytes;
+ memcpy(rxbuf, &temp, min((unsigned int)
+ (rxbuf_end - rxbuf),
+ bytes_to_read));
+ }
rxbuf += bytes_to_read;
remaining -= bytes_to_read;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 891846655000..a029b27fd002 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -198,6 +198,7 @@ config VXLAN
config GENEVE
tristate "Generic Network Virtualization Encapsulation"
depends on INET && NET_UDP_TUNNEL
+ depends on IPV6 || !IPV6
select NET_IP_TUNNEL
select GRO_CELLS
---help---
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 1ed9529e7bd1..e82108c917a6 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -40,11 +40,6 @@
#include <net/bonding.h>
#include <net/bond_alb.h>
-
-
-static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-};
static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
@@ -420,8 +415,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
if (assigned_slave) {
rx_hash_table[index].slave = assigned_slave;
- if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst,
- mac_bcast)) {
+ if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
bond_info->rx_hashtbl[index].ntt = 1;
bond_info->rx_ntt = 1;
/* A slave has been removed from the
@@ -450,7 +444,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave)
+ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -524,7 +518,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->slave == slave) &&
- !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
+ is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
ntt = 1;
}
@@ -565,7 +559,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
if ((client_info->ip_src == src_ip) &&
!ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
bond->dev->dev_addr) &&
- !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
+ is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
@@ -593,7 +587,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
if ((client_info->ip_src == arp->ip_src) &&
(client_info->ip_dst == arp->ip_dst)) {
/* the entry is already assigned to this client */
- if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
+ if (!is_broadcast_ether_addr(arp->mac_dst)) {
/* update mac address from arp */
ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
@@ -641,7 +635,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
- if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
+ if (is_valid_ether_addr(client_info->mac_dst)) {
client_info->ntt = 1;
bond->alb_info.rx_ntt = 1;
} else {
@@ -733,8 +727,10 @@ static void rlb_rebalance(struct bonding *bond)
assigned_slave = __rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
client_info->slave = assigned_slave;
- client_info->ntt = 1;
- ntt = 1;
+ if (!is_zero_ether_addr(client_info->mac_dst)) {
+ client_info->ntt = 1;
+ ntt = 1;
+ }
}
}
@@ -943,6 +939,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
+ netdev_dbg(slave->bond->dev,
+ "Send learning packet: dev %s mac %pM vlan %d\n",
+ slave->dev->name, mac_addr, vid);
+
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
@@ -965,14 +965,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
u8 *mac_addr = data->mac_addr;
struct bond_vlan_tag *tags;
- if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
- if (strict_match &&
- ether_addr_equal_64bits(mac_addr,
- upper->dev_addr)) {
+ if (is_vlan_dev(upper) &&
+ bond->nest_level == vlan_get_encap_level(upper) - 1) {
+ if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
- } else if (!strict_match) {
+ } else {
alb_send_lp_vid(slave, upper->dev_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
@@ -1316,8 +1315,8 @@ void bond_alb_deinitialize(struct bonding *bond)
rlb_deinitialize(bond);
}
-static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
- struct slave *tx_slave)
+static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+ struct slave *tx_slave)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct ethhdr *eth_data = eth_hdr(skb);
@@ -1351,7 +1350,7 @@ out:
return NETDEV_TX_OK;
}
-int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
@@ -1389,7 +1388,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
return bond_do_alb_xmit(skb, bond, tx_slave);
}
-int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
@@ -1409,9 +1408,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
- if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
- (iph->daddr == ip_bcast) ||
- (iph->protocol == IPPROTO_IGMP)) {
+ if (is_broadcast_ether_addr(eth_data->h_dest) ||
+ iph->daddr == ip_bcast ||
+ iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
@@ -1423,7 +1422,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
- if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
+ if (is_broadcast_ether_addr(eth_data->h_dest)) {
do_tx_balance = false;
break;
}
@@ -1479,8 +1478,24 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
if (do_tx_balance) {
- hash_index = _simple_hash(hash_start, hash_size);
- tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
+ if (bond->params.tlb_dynamic_lb) {
+ hash_index = _simple_hash(hash_start, hash_size);
+ tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
+ } else {
+ /*
+ * do_tx_balance means we are free to select the tx_slave
+ * So we do exactly what tlb would do for hash selection
+ */
+
+ struct bond_up_slave *slaves;
+ unsigned int count;
+
+ slaves = rcu_dereference(bond->slave_arr);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (likely(count))
+ tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
+ count];
+ }
}
return bond_do_alb_xmit(skb, bond, tx_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7b113018853..06efdf6a762b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -159,7 +159,7 @@ module_param(min_links, int, 0);
MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
module_param(xmit_hash_policy, charp, 0);
-MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
+MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
"2 for layer 2+3, 3 for encap layer 2+3, "
"4 for encap layer 3+4");
@@ -247,7 +247,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
+ skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
if (unlikely(netpoll_tx_running(bond->dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
@@ -1736,9 +1735,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
unblock_netpoll_tx();
}
- if (bond_mode_uses_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
+ bond->nest_level = dev_get_nest_level(bond_dev);
+
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
bond_is_active_slave(new_slave) ? "an active" : "a backup",
@@ -1869,7 +1870,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
- if (bond_mode_uses_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
netdev_info(bond_dev, "Releasing %s interface %s\n",
@@ -2136,6 +2137,24 @@ static int bond_miimon_inspect(struct bonding *bond)
return commit;
}
+static void bond_miimon_link_change(struct bonding *bond,
+ struct slave *slave,
+ char link)
+{
+ switch (BOND_MODE(bond)) {
+ case BOND_MODE_8023AD:
+ bond_3ad_handle_link_change(slave, link);
+ break;
+ case BOND_MODE_TLB:
+ case BOND_MODE_ALB:
+ bond_alb_handle_link_change(bond, slave, link);
+ break;
+ case BOND_MODE_XOR:
+ bond_update_slave_arr(bond, NULL);
+ break;
+ }
+}
+
static void bond_miimon_commit(struct bonding *bond)
{
struct list_head *iter;
@@ -2177,16 +2196,7 @@ static void bond_miimon_commit(struct bonding *bond)
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
- /* notify ad that the link status has changed */
- if (BOND_MODE(bond) == BOND_MODE_8023AD)
- bond_3ad_handle_link_change(slave, BOND_LINK_UP);
-
- if (bond_is_lb(bond))
- bond_alb_handle_link_change(bond, slave,
- BOND_LINK_UP);
-
- if (BOND_MODE(bond) == BOND_MODE_XOR)
- bond_update_slave_arr(bond, NULL);
+ bond_miimon_link_change(bond, slave, BOND_LINK_UP);
if (!bond->curr_active_slave || slave == primary)
goto do_failover;
@@ -2208,16 +2218,7 @@ static void bond_miimon_commit(struct bonding *bond)
netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
slave->dev->name);
- if (BOND_MODE(bond) == BOND_MODE_8023AD)
- bond_3ad_handle_link_change(slave,
- BOND_LINK_DOWN);
-
- if (bond_is_lb(bond))
- bond_alb_handle_link_change(bond, slave,
- BOND_LINK_DOWN);
-
- if (BOND_MODE(bond) == BOND_MODE_XOR)
- bond_update_slave_arr(bond, NULL);
+ bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
if (slave == rcu_access_pointer(bond->curr_active_slave))
goto do_failover;
@@ -3101,7 +3102,7 @@ static int bond_slave_netdev_event(unsigned long event,
* events. If these (miimon/arpmon) parameters are configured
* then array gets refreshed twice and that should be fine!
*/
- if (bond_mode_uses_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
break;
case NETDEV_CHANGEMTU:
@@ -3321,7 +3322,7 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
return -ENOMEM;
- if (bond->params.tlb_dynamic_lb)
+ if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
@@ -3340,7 +3341,7 @@ static int bond_open(struct net_device *bond_dev)
bond_3ad_initiate_agg_selection(bond, 1);
}
- if (bond_mode_uses_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
return 0;
@@ -3806,7 +3807,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
return slave_id;
}
-static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
+static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
+ struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct iphdr *iph = ip_hdr(skb);
@@ -3842,7 +3844,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
*/
-static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
+static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
+ struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
@@ -3891,7 +3894,7 @@ err:
* to determine the slave interface -
* (a) BOND_MODE_8023AD
* (b) BOND_MODE_XOR
- * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0
+ * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
*
* The caller is expected to hold RTNL only and NO other lock!
*/
@@ -3944,6 +3947,11 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
continue;
if (skipslave == slave)
continue;
+
+ netdev_dbg(bond->dev,
+ "Adding slave dev %s to tx hash array[%d]\n",
+ slave->dev->name, new_arr->count);
+
new_arr->arr[new_arr->count++] = slave;
}
@@ -3980,7 +3988,8 @@ out:
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
*/
-static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
@@ -4000,7 +4009,8 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* in broadcast mode, we send everything to all usable interfaces. */
-static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
+static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
+ struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
@@ -4037,12 +4047,12 @@ static inline int bond_slave_override(struct bonding *bond,
struct slave *slave = NULL;
struct list_head *iter;
- if (!skb->queue_mapping)
+ if (!skb_rx_queue_recorded(skb))
return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb->queue_mapping) {
+ if (slave->queue_id == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -4068,7 +4078,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/* Save the original txq to restore before passing to the driver */
- qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
@@ -4319,9 +4329,9 @@ static int bond_check_params(struct bond_params *params)
}
if (xmit_hash_policy) {
- if ((bond_mode != BOND_MODE_XOR) &&
- (bond_mode != BOND_MODE_8023AD) &&
- (bond_mode != BOND_MODE_TLB)) {
+ if (bond_mode == BOND_MODE_ROUNDROBIN ||
+ bond_mode == BOND_MODE_ACTIVEBACKUP ||
+ bond_mode == BOND_MODE_BROADCAST) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 58c705f24f96..8a945c9341d6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -395,7 +395,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_TLB_DYNAMIC_LB,
.name = "tlb_dynamic_lb",
.desc = "Enable dynamic flow shuffling",
- .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB) | BIT(BOND_MODE_ALB)),
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b1779566c5bb..3c71f1cb205f 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -605,7 +605,7 @@ void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- netdev_dbg(dev, "bus-off\n");
+ netdev_info(dev, "bus-off\n");
netif_carrier_off(dev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 634c51e6b8ae..d53a45bf2a72 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -200,6 +200,7 @@
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
/* Structure of the message buffer */
struct flexcan_mb {
@@ -288,6 +289,12 @@ struct flexcan_priv {
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
+};
+
+static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
- { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
- { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
- { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
+ { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
+ { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
- if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+ if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
+ devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
priv->read = flexcan_read_be;
priv->write = flexcan_write_be;
} else {
- if (of_device_is_compatible(pdev->dev.of_node,
- "fsl,p1010-flexcan")) {
- priv->read = flexcan_read_be;
- priv->write = flexcan_write_be;
- } else {
- priv->read = flexcan_read_le;
- priv->write = flexcan_write_le;
- }
+ priv->read = flexcan_read_le;
+ priv->write = flexcan_write_le;
}
priv->can.clock.freq = clock_freq;
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 5590c559a8ca..53e320c92a8b 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -91,6 +91,7 @@
#define HI3110_STAT_BUSOFF BIT(2)
#define HI3110_STAT_ERRP BIT(3)
#define HI3110_STAT_ERRW BIT(4)
+#define HI3110_STAT_TXMTY BIT(7)
#define HI3110_BTR0_SJW_SHIFT 6
#define HI3110_BTR0_BRP_SHIFT 0
@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
+ mutex_lock(&priv->hi3110_lock);
bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
+ mutex_unlock(&priv->hi3110_lock);
return 0;
}
@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
}
}
- if (intf == 0)
- break;
-
- if (intf & HI3110_INT_TXCPLT) {
+ if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
}
netif_wake_queue(net);
}
+
+ if (intf == 0)
+ break;
}
mutex_unlock(&priv->hi3110_lock);
return IRQ_HANDLED;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 63587b8e6825..daed57d3d209 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
- stats->tx_dropped++;
+ stats->rx_dropped++;
return;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 78616787f2a3..9f561fe505cb 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -806,16 +806,39 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
return B53_MIBS_SIZE;
}
-void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
+{
+ /* These ports typically do not have built-in PHYs */
+ switch (port) {
+ case B53_CPU_PORT_25:
+ case 7:
+ case B53_CPU_PORT:
+ return NULL;
+ }
+
+ return mdiobus_get_phy(ds->slave_mii_bus, port);
+}
+
+void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
{
struct b53_device *dev = ds->priv;
const struct b53_mib_desc *mibs = b53_get_mib(dev);
unsigned int mib_size = b53_get_mib_size(dev);
+ struct phy_device *phydev;
unsigned int i;
- for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
- mibs[i].name, ETH_GSTRING_LEN);
+ if (stringset == ETH_SS_STATS) {
+ for (i = 0; i < mib_size; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
+ } else if (stringset == ETH_SS_PHY_STATS) {
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return;
+
+ phy_ethtool_get_strings(phydev, data);
+ }
}
EXPORT_SYMBOL(b53_get_strings);
@@ -852,11 +875,34 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
}
EXPORT_SYMBOL(b53_get_ethtool_stats);
-int b53_get_sset_count(struct dsa_switch *ds, int port)
+void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct phy_device *phydev;
+
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return;
+
+ phy_ethtool_get_stats(phydev, NULL, data);
+}
+EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
+
+int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct b53_device *dev = ds->priv;
+ struct phy_device *phydev;
+
+ if (sset == ETH_SS_STATS) {
+ return b53_get_mib_size(dev);
+ } else if (sset == ETH_SS_PHY_STATS) {
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return 0;
+
+ return phy_ethtool_get_sset_count(phydev);
+ }
- return b53_get_mib_size(dev);
+ return 0;
}
EXPORT_SYMBOL(b53_get_sset_count);
@@ -1477,7 +1523,7 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_br_fast_age);
-static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
{
/* Broadcom switches will accept enabling Broadcom tags on the
* following ports: 5, 7 and 8, any other port is not supported
@@ -1489,10 +1535,19 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
return true;
}
- dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port);
return false;
}
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+{
+ bool ret = b53_possible_cpu_port(ds, port);
+
+ if (!ret)
+ dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
+ port);
+ return ret;
+}
+
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
@@ -1650,6 +1705,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
+ .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
@@ -1954,6 +2010,15 @@ static int b53_switch_init(struct b53_device *dev)
dev->num_ports = dev->cpu_port + 1;
dev->enabled_ports |= BIT(dev->cpu_port);
+ /* Include non standard CPU port built-in PHYs to be probed */
+ if (is539x(dev) || is531x5(dev)) {
+ for (i = 0; i < dev->num_ports; i++) {
+ if (!(dev->ds->phys_mii_mask & BIT(i)) &&
+ !b53_possible_cpu_port(dev->ds, i))
+ dev->ds->phys_mii_mask |= BIT(i);
+ }
+ }
+
dev->ports = devm_kzalloc(dev->dev,
sizeof(struct b53_port) * dev->num_ports,
GFP_KERNEL);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1187ebd79287..cc284a514de9 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -286,9 +286,11 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
/* Exported functions towards other drivers */
void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
int b53_configure_vlan(struct dsa_switch *ds);
-void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data);
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-int b53_get_sset_count(struct dsa_switch *ds, int port);
+int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 0378eded31f2..02e8982519ce 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/mii.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -306,7 +307,8 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
- struct bcm_sf2_priv *priv = dev_id;
+ struct dsa_switch *ds = dev_id;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~priv->irq0_mask;
@@ -317,16 +319,21 @@ static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
{
- struct bcm_sf2_priv *priv = dev_id;
+ struct dsa_switch *ds = dev_id;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
~priv->irq1_mask;
intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
- if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
- priv->port_sts[7].link = 1;
- if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
- priv->port_sts[7].link = 0;
+ if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
+ priv->port_sts[7].link = true;
+ dsa_port_phylink_mac_change(ds, 7, true);
+ }
+ if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
+ priv->port_sts[7].link = false;
+ dsa_port_phylink_mac_change(ds, 7, false);
+ }
return IRQ_HANDLED;
}
@@ -443,12 +450,8 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- if (dn)
- err = of_mdiobus_register(priv->slave_mii_bus, dn);
- else
- err = mdiobus_register(priv->slave_mii_bus);
-
- if (err)
+ err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ if (err && dn)
of_node_put(dn);
return err;
@@ -473,13 +476,56 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
return priv->hw_params.gphy_rev;
}
-static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_MOCA) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII and Reverse MII, we support Gigabit,
+ * including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
u32 id_mode_dis = 0, port_mode;
- const char *str = NULL;
u32 reg, offset;
if (priv->type == BCM7445_DEVICE_ID)
@@ -487,62 +533,48 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
else
offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
- switch (phydev->interface) {
+ switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
- str = "RGMII (no delay)";
id_mode_dis = 1;
+ /* fallthrough */
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!str)
- str = "RGMII (TX delay)";
port_mode = EXT_GPHY;
break;
case PHY_INTERFACE_MODE_MII:
- str = "MII";
port_mode = EXT_EPHY;
break;
case PHY_INTERFACE_MODE_REVMII:
- str = "Reverse MII";
port_mode = EXT_REVMII;
break;
default:
- /* All other PHYs: internal and MoCA */
+ /* all other PHYs: internal and MoCA */
goto force_link;
}
- /* If the link is down, just disable the interface to conserve power */
- if (!phydev->link) {
- reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
- reg &= ~RGMII_MODE_EN;
- reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
- goto force_link;
- }
-
- /* Clear id_mode_dis bit, and the existing port mode, but
- * make sure we enable the RGMII block for data to pass
+ /* Clear id_mode_dis bit, and the existing port mode, let
+ * RGMII_MODE_EN bet set by mac_link_{up,down}
*/
reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
reg &= ~ID_MODE_DIS;
reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
- reg |= port_mode | RGMII_MODE_EN;
+ reg |= port_mode;
if (id_mode_dis)
reg |= ID_MODE_DIS;
- if (phydev->pause) {
- if (phydev->asym_pause)
+ if (state->pause & MLO_PAUSE_TXRX_MASK) {
+ if (state->pause & MLO_PAUSE_TX)
reg |= TX_PAUSE_EN;
reg |= RX_PAUSE_EN;
}
reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
- pr_info("Port %d configured for %s\n", port, str);
-
force_link:
/* Force link settings detected from the PHY */
reg = SW_OVERRIDE;
- switch (phydev->speed) {
+ switch (state->speed) {
case SPEED_1000:
reg |= SPDSTS_1000 << SPEED_SHIFT;
break;
@@ -551,33 +583,61 @@ force_link:
break;
}
- if (phydev->link)
+ if (state->link)
reg |= LINK_STS;
- if (phydev->duplex == DUPLEX_FULL)
+ if (state->duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
core_writel(priv, reg, offset);
-
- if (!phydev->is_pseudo_fixed_link)
- p->eee_enabled = b53_eee_init(ds, port, phydev);
}
-static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
- struct fixed_phy_status *status)
+static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
+ phy_interface_t interface, bool link)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 duplex, pause, offset;
u32 reg;
- if (priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ if (!phy_interface_mode_is_rgmii(interface) &&
+ interface != PHY_INTERFACE_MODE_MII &&
+ interface != PHY_INTERFACE_MODE_REVMII)
+ return;
+
+ /* If the link is down, just disable the interface to conserve power */
+ reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
+ if (link)
+ reg |= RGMII_MODE_EN;
else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ reg &= ~RGMII_MODE_EN;
+ reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
+}
- duplex = core_readl(priv, CORE_DUPSTS);
- pause = core_readl(priv, CORE_PAUSESTS);
+static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+}
- status->link = 0;
+static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_eee *p = &priv->dev->ports[port].eee;
+
+ bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+
+ if (mode == MLO_AN_PHY && phydev)
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
+}
+
+static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *status)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ status->link = false;
/* MoCA port is special as we do not get link status from CORE_LNKSTS,
* which means that we need to force the link at the port override
@@ -596,28 +656,10 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
*/
if (!status->link)
netif_carrier_off(ds->ports[port].slave);
- status->duplex = 1;
+ status->duplex = DUPLEX_FULL;
} else {
- status->link = 1;
- status->duplex = !!(duplex & (1 << port));
- }
-
- reg = core_readl(priv, offset);
- reg |= SW_OVERRIDE;
- if (status->link)
- reg |= LINK_STS;
- else
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
-
- if ((pause & (1 << port)) &&
- (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
- status->asym_pause = 1;
- status->pause = 1;
+ status->link = true;
}
-
- if (pause & (1 << port))
- status->pause = 1;
}
static void bcm_sf2_enable_acb(struct dsa_switch *ds)
@@ -859,9 +901,13 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
+ .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .adjust_link = bcm_sf2_sw_adjust_link,
- .fixed_link_update = bcm_sf2_sw_fixed_link_update,
+ .phylink_validate = bcm_sf2_sw_validate,
+ .phylink_mac_config = bcm_sf2_sw_mac_config,
+ .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
+ .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
+ .phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
@@ -1064,14 +1110,14 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
bcm_sf2_intr_disable(priv);
ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
- "switch_0", priv);
+ "switch_0", ds);
if (ret < 0) {
pr_err("failed to request switch_0 IRQ\n");
goto out_mdio;
}
ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
- "switch_1", priv);
+ "switch_1", ds);
if (ret < 0) {
pr_err("failed to request switch_1 IRQ\n");
goto out_mdio;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 23b45da784cb..b89acaee12d4 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
rule_index = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
+ priv->num_cfp_rules);
else
rule_index = fs->location;
+ if (rule_index > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
+
layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
* first half because the HW search is by incrementing addresses.
*/
if (fs->location == RX_CLS_LOC_ANY)
- rule_index[0] = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
+ rule_index[1] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
else
- rule_index[0] = fs->location;
+ rule_index[1] = fs->location;
+ if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
/* Flag it as used (cleared on error path) such that we can immediately
* obtain a second one to chain from.
*/
- set_bit(rule_index[0], priv->cfp.used);
+ set_bit(rule_index[1], priv->cfp.used);
- rule_index[1] = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
- if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
+ rule_index[0] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
+ if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
ret = -ENOSPC;
goto out_err;
}
@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Flag the second half rule as being used now, return it as the
* location, and flag it as unique while dumping rules
*/
- set_bit(rule_index[1], priv->cfp.used);
+ set_bit(rule_index[0], priv->cfp.used);
set_bit(rule_index[1], priv->cfp.unique);
fs->location = rule_index[1];
return ret;
out_err:
- clear_bit(rule_index[0], priv->cfp.used);
+ clear_bit(rule_index[1], priv->cfp.used);
return ret;
}
@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
- /* Refuse deletion of unused rules, and the default reserved rule */
- if (!test_bit(loc, priv->cfp.used) || loc == 0)
- return -EINVAL;
-
/* Indicate which rule we want to read */
bcm_sf2_cfp_rule_addr_set(priv, loc);
@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
u32 next_loc = 0;
int ret;
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
+ */
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
+ return -EINVAL;
+
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index f77be9f85cb3..58f14af04639 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -86,16 +86,23 @@ static int dsa_loop_setup(struct dsa_switch *ds)
return 0;
}
-static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port)
+static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS)
+ return 0;
+
return __DSA_LOOP_CNT_MAX;
}
-static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
{
struct dsa_loop_priv *ps = ds->priv;
unsigned int i;
+ if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS)
+ return;
+
for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
memcpy(data + i * ETH_GSTRING_LEN,
ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
@@ -256,6 +263,7 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
+ .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats,
.phy_read = dsa_loop_phy_read,
.phy_write = dsa_loop_phy_write,
.port_bridge_join = dsa_loop_port_bridge_join,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fefa454f3e56..b4f6e1a67dd9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -977,10 +977,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
};
-static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void lan9303_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
{
unsigned int u;
+ if (stringset != ETH_SS_STATS)
+ return;
+
for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
ETH_GSTRING_LEN);
@@ -1007,8 +1011,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
}
}
-static int lan9303_get_sset_count(struct dsa_switch *ds, int port)
+static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
return ARRAY_SIZE(lan9303_mib);
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index bcb3e6c734f2..7210c49b7922 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -439,15 +439,22 @@ static void ksz_disable_port(struct dsa_switch *ds, int port,
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
}
-static int ksz_sset_count(struct dsa_switch *ds, int port)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
return TOTAL_SWITCH_COUNTER_NUM;
}
-static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf)
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
{
int i;
+ if (stringset != ETH_SS_STATS)
+ return;
+
for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
ETH_GSTRING_LEN);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 80a4dbc3a499..62e486652e62 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -573,10 +573,14 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
}
static void
-mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
{
int i;
+ if (stringset != ETH_SS_STATS)
+ return;
+
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
ETH_GSTRING_LEN);
@@ -604,8 +608,11 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
}
static int
-mt7530_get_sset_count(struct dsa_switch *ds, int port)
+mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
return ARRAY_SIZE(mt7530_mib);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3d2091099f7f..12df00f593b7 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -28,9 +28,11 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/mv88e6xxx.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <net/dsa.h>
#include "chip.h"
@@ -580,6 +582,83 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
}
+static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+}
+
+static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_link_state(chip, port, state);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int speed, duplex, link, err;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ link = LINK_FORCED_UP;
+ speed = state->speed;
+ duplex = state->duplex;
+ } else {
+ speed = SPEED_UNFORCED;
+ duplex = DUPLEX_UNFORCED;
+ link = LINK_UNFORCED;
+ }
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
+ state->interface);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err && err != -EOPNOTSUPP)
+ dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
+}
+
+static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->port_set_link(chip, port, link);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
+}
+
+static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ if (mode == MLO_AN_FIXED)
+ mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN);
+}
+
+static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ if (mode == MLO_AN_FIXED)
+ mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP);
+}
+
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
if (!chip->info->ops->stats_snapshot)
@@ -665,13 +744,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
case STATS_TYPE_PORT:
err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
if (err)
- return UINT64_MAX;
+ return U64_MAX;
low = reg;
if (s->size == 4) {
err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
if (err)
- return UINT64_MAX;
+ return U64_MAX;
high = reg;
}
break;
@@ -685,7 +764,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
break;
default:
- return UINT64_MAX;
+ return U64_MAX;
}
value = (((u64)high) << 16) | low;
return value;
@@ -742,11 +821,14 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
}
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
- uint8_t *data)
+ u32 stringset, uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
int count = 0;
+ if (stringset != ETH_SS_STATS)
+ return;
+
mutex_lock(&chip->reg_lock);
if (chip->info->ops->stats_get_strings)
@@ -789,12 +871,15 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
STATS_TYPE_BANK1);
}
-static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct mv88e6xxx_chip *chip = ds->priv;
int serdes_count = 0;
int count = 0;
+ if (sset != ETH_SS_STATS)
+ return 0;
+
mutex_lock(&chip->reg_lock);
if (chip->info->ops->stats_get_sset_count)
count = chip->info->ops->stats_get_sset_count(chip);
@@ -911,14 +996,6 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
}
-static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->ops->stats_set_histogram)
- return chip->info->ops->stats_set_histogram(chip);
-
- return 0;
-}
-
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
return 32 * sizeof(u16);
@@ -1020,6 +1097,76 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
dev_err(ds->dev, "p%d: failed to update state\n", port);
}
+static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ if (chip->info->ops->ieee_pri_map) {
+ err = chip->info->ops->ieee_pri_map(chip);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->ip_pri_map) {
+ err = chip->info->ops->ip_pri_map(chip);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
+{
+ int target, port;
+ int err;
+
+ if (!chip->info->global2_addr)
+ return 0;
+
+ /* Initialize the routing port to the 32 possible target devices */
+ for (target = 0; target < 32; target++) {
+ port = 0x1f;
+ if (target < DSA_MAX_SWITCHES)
+ if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
+ port = chip->ds->rtable[target];
+
+ err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->set_cascade_port) {
+ port = MV88E6XXX_CASCADE_PORT_MULTIPLE;
+ err = chip->info->ops->set_cascade_port(chip, port);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip)
+{
+ /* Clear all trunk masks and mapping */
+ if (chip->info->global2_addr)
+ return mv88e6xxx_g2_trunk_clear(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->rmu_disable)
+ return chip->info->ops->rmu_disable(chip);
+
+ return 0;
+}
+
static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->pot_clear)
@@ -2113,53 +2260,16 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
return err;
}
-static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
{
- struct dsa_switch *ds = chip->ds;
int err;
- /* Disable remote management, and set the switch's DSA device number. */
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
- MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
- if (err)
- return err;
-
- /* Configure the IP ToS mapping registers. */
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
- if (err)
- return err;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
- if (err)
- return err;
-
- /* Configure the IEEE 802.1p priority mapping register. */
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
- if (err)
- return err;
-
/* Initialize the statistics unit */
- err = mv88e6xxx_stats_set_histogram(chip);
- if (err)
- return err;
+ if (chip->info->ops->stats_set_histogram) {
+ err = chip->info->ops->stats_set_histogram(chip);
+ if (err)
+ return err;
+ }
return mv88e6xxx_g1_stats_clear(chip);
}
@@ -2185,18 +2295,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
- /* Setup Switch Global 1 Registers */
- err = mv88e6xxx_g1_setup(chip);
- if (err)
- goto unlock;
-
- /* Setup Switch Global 2 Registers */
- if (chip->info->global2_addr) {
- err = mv88e6xxx_g2_setup(chip);
- if (err)
- goto unlock;
- }
-
err = mv88e6xxx_irl_setup(chip);
if (err)
goto unlock;
@@ -2229,10 +2327,26 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ err = mv88e6xxx_rmu_setup(chip);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_rsvd2cpu_setup(chip);
if (err)
goto unlock;
+ err = mv88e6xxx_trunk_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_devmap_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pri_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup PTP Hardware Clock and timestamping */
if (chip->info->ptp_support) {
err = mv88e6xxx_ptp_setup(chip);
@@ -2244,6 +2358,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
}
+ err = mv88e6xxx_stats_setup(chip);
+ if (err)
+ goto unlock;
+
unlock:
mutex_unlock(&chip->reg_lock);
@@ -2337,10 +2455,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
- if (np)
- err = of_mdiobus_register(bus, np);
- else
- err = mdiobus_register(bus);
+ err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
@@ -2460,6 +2575,8 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
static const struct mv88e6xxx_ops mv88e6085_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
@@ -2488,12 +2605,16 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
+ .rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6095_ops = {
/* MV88E6XXX_FAMILY_6095 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
@@ -2518,6 +2639,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
static const struct mv88e6xxx_ops mv88e6097_ops = {
/* MV88E6XXX_FAMILY_6097 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2545,12 +2668,15 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
};
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2579,6 +2705,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
static const struct mv88e6xxx_ops mv88e6131_ops = {
/* MV88E6XXX_FAMILY_6185 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
@@ -2603,6 +2731,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
+ .set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
@@ -2611,6 +2740,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
static const struct mv88e6xxx_ops mv88e6141_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -2648,6 +2779,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2681,6 +2814,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6165_phy_read,
@@ -2707,6 +2842,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
static const struct mv88e6xxx_ops mv88e6171_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2741,6 +2878,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
static const struct mv88e6xxx_ops mv88e6172_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -2771,6 +2910,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
@@ -2779,6 +2919,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
static const struct mv88e6xxx_ops mv88e6175_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -2809,10 +2951,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6341_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -2843,6 +2988,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
@@ -2851,6 +2997,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
static const struct mv88e6xxx_ops mv88e6185_ops = {
/* MV88E6XXX_FAMILY_6185 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.set_switch_mac = mv88e6xxx_g1_set_switch_mac,
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
@@ -2870,6 +3018,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
@@ -2907,6 +3056,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -2943,6 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -2979,6 +3130,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -2986,6 +3138,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
static const struct mv88e6xxx_ops mv88e6240_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3016,6 +3170,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
@@ -3054,6 +3209,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3063,6 +3219,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3099,6 +3257,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3133,6 +3293,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
static const struct mv88e6xxx_ops mv88e6341_ops = {
/* MV88E6XXX_FAMILY_6341 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3171,6 +3333,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
static const struct mv88e6xxx_ops mv88e6350_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -3205,6 +3369,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
static const struct mv88e6xxx_ops mv88e6351_ops = {
/* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
@@ -3240,6 +3406,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
static const struct mv88e6xxx_ops mv88e6352_ops = {
/* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
@@ -3270,6 +3438,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
@@ -3313,6 +3482,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3353,6 +3523,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3370,6 +3541,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3391,6 +3563,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3410,6 +3583,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 8,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3431,6 +3605,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3452,6 +3627,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3472,6 +3648,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 11,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3493,6 +3670,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3514,6 +3692,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3535,6 +3714,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3557,6 +3737,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3578,6 +3759,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3600,6 +3782,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3621,6 +3804,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 0,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3641,6 +3825,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.tag_protocol = DSA_TAG_PROTO_DSA,
@@ -3663,6 +3848,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3684,6 +3870,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 11,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3707,6 +3894,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3730,6 +3918,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3753,6 +3942,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3776,6 +3966,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3798,6 +3989,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 11,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3820,6 +4012,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3841,6 +4034,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3863,6 +4057,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 15000,
@@ -3885,6 +4080,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -3907,6 +4103,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
@@ -4099,6 +4296,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.adjust_link = mv88e6xxx_adjust_link,
+ .phylink_validate = mv88e6xxx_validate,
+ .phylink_mac_link_state = mv88e6xxx_link_state,
+ .phylink_mac_config = mv88e6xxx_mac_config,
+ .phylink_mac_link_down = mv88e6xxx_mac_link_down,
+ .phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
@@ -4149,6 +4351,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
return -ENOMEM;
ds->priv = chip;
+ ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
@@ -4163,42 +4366,82 @@ static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
dsa_unregister_switch(chip->ds);
}
+static const void *pdata_device_get_match_data(struct device *dev)
+{
+ const struct of_device_id *matches = dev->driver->of_match_table;
+ const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data;
+
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0];
+ matches++) {
+ if (!strcmp(pdata->compatible, matches->compatible))
+ return matches->data;
+ }
+ return NULL;
+}
+
static int mv88e6xxx_probe(struct mdio_device *mdiodev)
{
+ struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data;
+ const struct mv88e6xxx_info *compat_info = NULL;
struct device *dev = &mdiodev->dev;
struct device_node *np = dev->of_node;
- const struct mv88e6xxx_info *compat_info;
struct mv88e6xxx_chip *chip;
- u32 eeprom_len;
+ int port;
int err;
- compat_info = of_device_get_match_data(dev);
+ if (np)
+ compat_info = of_device_get_match_data(dev);
+
+ if (pdata) {
+ compat_info = pdata_device_get_match_data(dev);
+
+ if (!pdata->netdev)
+ return -EINVAL;
+
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!(pdata->enabled_ports & (1 << port)))
+ continue;
+ if (strcmp(pdata->cd.port_names[port], "cpu"))
+ continue;
+ pdata->cd.netdev[port] = &pdata->netdev->dev;
+ break;
+ }
+ }
+
if (!compat_info)
return -EINVAL;
chip = mv88e6xxx_alloc_chip(dev);
- if (!chip)
- return -ENOMEM;
+ if (!chip) {
+ err = -ENOMEM;
+ goto out;
+ }
chip->info = compat_info;
err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
if (err)
- return err;
+ goto out;
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(chip->reset))
- return PTR_ERR(chip->reset);
+ if (IS_ERR(chip->reset)) {
+ err = PTR_ERR(chip->reset);
+ goto out;
+ }
err = mv88e6xxx_detect(chip);
if (err)
- return err;
+ goto out;
mv88e6xxx_phy_init(chip);
- if (chip->info->ops->get_eeprom &&
- !of_property_read_u32(np, "eeprom-length", &eeprom_len))
- chip->eeprom_len = eeprom_len;
+ if (chip->info->ops->get_eeprom) {
+ if (np)
+ of_property_read_u32(np, "eeprom-length",
+ &chip->eeprom_len);
+ else
+ chip->eeprom_len = pdata->eeprom_len;
+ }
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
@@ -4267,6 +4510,9 @@ out_g1_irq:
mv88e6xxx_irq_poll_free(chip);
mutex_unlock(&chip->reg_lock);
out:
+ if (pdata)
+ dev_put(pdata->netdev);
+
return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 80490f66bc06..8ac3fbb15352 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,10 +21,6 @@
#include <linux/timecounter.h>
#include <net/dsa.h>
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
-
#define SMI_CMD 0x00
#define SMI_CMD_BUSY BIT(15)
#define SMI_CMD_CLAUSE_22 BIT(12)
@@ -114,6 +110,7 @@ struct mv88e6xxx_info {
unsigned int num_gpio;
unsigned int max_vid;
unsigned int port_base_addr;
+ unsigned int phy_base_addr;
unsigned int global1_addr;
unsigned int global2_addr;
unsigned int age_time_coeff;
@@ -241,7 +238,7 @@ struct mv88e6xxx_chip {
struct gpio_desc *reset;
/* set to size of eeprom if supported by the switch */
- int eeprom_len;
+ u32 eeprom_len;
/* List of mdio busses */
struct list_head mdios;
@@ -297,6 +294,9 @@ struct mv88e6xxx_mdio_bus {
};
struct mv88e6xxx_ops {
+ int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
+ int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
+
/* Ingress Rate Limit unit (IRL) operations */
int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port);
@@ -405,6 +405,12 @@ struct mv88e6xxx_ops {
uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+
+#define MV88E6XXX_CASCADE_PORT_NONE 0xe
+#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
+
+ int (*set_cascade_port)(struct mv88e6xxx_chip *chip, int port);
+
const struct mv88e6xxx_irq_ops *watchdog_ops;
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
@@ -430,6 +436,9 @@ struct mv88e6xxx_ops {
/* Interface to the AVB/PTP registers */
const struct mv88e6xxx_avb_ops *avb_ops;
+
+ /* Remote Management Unit operations */
+ int (*rmu_disable)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_irq_ops {
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index b43bd6476632..d721ccf7d8be 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -241,6 +241,64 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
return mv88e6185_g1_wait_ppu_disabled(chip);
}
+/* Offset 0x10: IP-PRI Mapping Register 0
+ * Offset 0x11: IP-PRI Mapping Register 1
+ * Offset 0x12: IP-PRI Mapping Register 2
+ * Offset 0x13: IP-PRI Mapping Register 3
+ * Offset 0x14: IP-PRI Mapping Register 4
+ * Offset 0x15: IP-PRI Mapping Register 5
+ * Offset 0x16: IP-PRI Mapping Register 6
+ * Offset 0x17: IP-PRI Mapping Register 7
+ */
+
+int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Reset the IP TOS/DiffServ/Traffic priorities to defaults */
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x18: IEEE-PRI Register */
+
+int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
+{
+ /* Reset the IEEE Tag priorities to defaults */
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
+}
+
/* Offset 0x1a: Monitor Control */
/* Offset 0x1a: Monitor & MGMT Control on some devices */
@@ -350,20 +408,59 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
/* Offset 0x1c: Global Control 2 */
-int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask,
+ u16 val)
{
- u16 val;
+ u16 reg;
int err;
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &val);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &reg);
if (err)
return err;
- val |= MV88E6XXX_G1_CTL2_HIST_RX_TX;
+ reg &= ~mask;
+ reg |= val & mask;
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, val);
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg);
+}
- return err;
+int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port)
+{
+ const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK;
+
+ return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask));
+}
+
+int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM |
+ MV88E6085_G1_CTL2_RM_ENABLE, 0);
+}
+
+int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK,
+ MV88E6352_G1_CTL2_RMU_MODE_DISABLED);
+}
+
+int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK,
+ MV88E6390_G1_CTL2_RMU_MODE_DISABLED);
+}
+
+int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
+ MV88E6390_G1_CTL2_HIST_MODE_RX |
+ MV88E6390_G1_CTL2_HIST_MODE_TX);
+}
+
+int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip,
+ MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK,
+ index);
}
/* Offset 0x1d: Statistics Operation 2 */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 6aee7316fea6..7c791c1da4b9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -201,11 +201,35 @@
/* Offset 0x1C: Global Control 2 */
#define MV88E6XXX_G1_CTL2 0x1c
-#define MV88E6XXX_G1_CTL2_NO_CASCADE 0xe000
-#define MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE 0xf000
-#define MV88E6XXX_G1_CTL2_HIST_RX 0x0040
-#define MV88E6XXX_G1_CTL2_HIST_TX 0x0080
-#define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0
+#define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000
+#define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000
+#define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_MASK 0xc000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_ORIG 0x0000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_MGMT 0x4000
+#define MV88E6390_G1_CTL2_HEADER_TYPE_LAG 0x8000
+#define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000
+#define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_5 0x2000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_6 0x3000
+#define MV88E6085_G1_CTL2_DA_CHECK 0x4000
+#define MV88E6085_G1_CTL2_P10RM 0x2000
+#define MV88E6085_G1_CTL2_RM_ENABLE 0x1000
+#define MV88E6352_G1_CTL2_DA_CHECK 0x0800
+#define MV88E6390_G1_CTL2_RMU_MODE_MASK 0x0700
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_0 0x0000
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_1 0x0100
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_9 0x0200
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300
+#define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600
+#define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700
+#define MV88E6390_G1_CTL2_HIST_MODE_MASK 0x00c0
+#define MV88E6390_G1_CTL2_HIST_MODE_RX 0x0040
+#define MV88E6390_G1_CTL2_HIST_MODE_TX 0x0080
+#define MV88E6352_G1_CTL2_CTR_MODE_MASK 0x0060
+#define MV88E6390_G1_CTL2_CTR_MODE 0x0020
+#define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f
/* Offset 0x1D: Stats Operation Register */
#define MV88E6XXX_G1_STATS_OP 0x1d
@@ -253,6 +277,17 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
+int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port);
+
+int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index);
+
int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
unsigned int msecs);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 0ce627fded48..91a3cb2452ac 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -119,37 +119,17 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
/* Offset 0x06: Device Mapping Table register */
-static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
- int target, int port)
+int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ int port)
{
- u16 val = (target << 8) | (port & 0xf);
+ u16 val = (target << 8) | (port & 0x1f);
+ /* Modern chips use 5 bits to define a device mapping port,
+ * but bit 4 is reserved on older chips, so it is safe to use.
+ */
return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
}
-static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
-{
- int target, port;
- int err;
-
- /* Initialize the routing port to the 32 possible target devices */
- for (target = 0; target < 32; ++target) {
- port = 0xf;
-
- if (target < DSA_MAX_SWITCHES) {
- port = chip->ds->rtable[target];
- if (port == DSA_RTABLE_NONE)
- port = 0xf;
- }
-
- err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
- if (err)
- break;
- }
-
- return err;
-}
-
/* Offset 0x07: Trunk Mask Table register */
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
@@ -174,7 +154,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
}
-static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
{
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
int i, err;
@@ -1067,9 +1047,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
- if (!chip->dev->of_node)
- return -EINVAL;
-
chip->g2_irq.domain = irq_domain_add_simple(
chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
@@ -1118,7 +1095,7 @@ int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
err = irq;
goto out;
}
- bus->irq[chip->info->port_base_addr + phy] = irq;
+ bus->irq[chip->info->phy_base_addr + phy] = irq;
}
return 0;
out:
@@ -1138,31 +1115,3 @@ void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
for (phy = 0; phy < chip->info->num_internal_phys; phy++)
irq_dispose_mapping(bus->irq[phy]);
}
-
-int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
-{
- u16 reg;
- int err;
-
- /* Ignore removed tag data on doubly tagged packets, disable
- * flow control messages, force flow control priority to the
- * highest, and send all special multicast frames to the CPU
- * port at the highest priority.
- */
- reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4);
- err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg);
- if (err)
- return err;
-
- /* Program the DSA routing table. */
- err = mv88e6xxx_g2_set_device_mapping(chip);
- if (err)
- return err;
-
- /* Clear all trunk masks and mapping. */
- err = mv88e6xxx_g2_clear_trunk(chip);
- if (err)
- return err;
-
- return 0;
-}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 520ec70d32e8..37e8ce2c72a0 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -60,7 +60,8 @@
#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
-#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f
+#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f
+#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f
/* Offset 0x07: Trunk Mask Table Register */
#define MV88E6XXX_G2_TRUNK_MASK 0x07
@@ -313,7 +314,6 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
int src_port, u16 data);
int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
-int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
@@ -327,6 +327,11 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ int port);
+
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
@@ -441,11 +446,6 @@ static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
-{
- return -EOPNOTSUPP;
-}
-
static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
{
return -EOPNOTSUPP;
@@ -495,6 +495,17 @@ static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
+ int target, int port)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index ac7694c71266..a036c490b7ce 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
struct sk_buff_head *rxq)
{
u16 buf[4] = { 0 }, status, seq_id;
- u64 ns, timelo, timehi;
struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ u64 ns, timelo, timehi;
+ unsigned long flags;
int err;
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&rxq->lock, flags);
+ skb_queue_splice_tail_init(rxq, &received);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
reg, buf, ARRAY_SIZE(buf));
@@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
/* Since the device can only handle one time stamp at a time,
* we purge any extra frames from the queue.
*/
- for ( ; skb; skb = skb_dequeue(rxq)) {
+ for ( ; skb; skb = __skb_dequeue(&received)) {
if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
ns = timehi << 16 | timelo;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 6315774d72b3..429d0ebcd5b1 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -15,6 +15,7 @@
#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "chip.h"
#include "port.h"
@@ -378,6 +379,44 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
+int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) {
+ case MV88E6XXX_PORT_STS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_10000:
+ if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) ==
+ MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_10000;
+ break;
+ }
+
+ state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ state->link = !!(reg & MV88E6XXX_PORT_STS_LINK);
+ state->an_enabled = 1;
+ state->an_complete = state->link;
+
+ return 0;
+}
+
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b16d5f0e6e9c..5e1db1b221ca 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -29,6 +29,7 @@
#define MV88E6XXX_PORT_STS_SPEED_10 0x0000
#define MV88E6XXX_PORT_STS_SPEED_100 0x0100
#define MV88E6XXX_PORT_STS_SPEED_1000 0x0200
+#define MV88E6XXX_PORT_STS_SPEED_10000 0x0300
#define MV88E6352_PORT_STS_EEE 0x0040
#define MV88E6165_PORT_STS_AM_DIS 0x0040
#define MV88E6185_PORT_STS_MGMII 0x0040
@@ -295,6 +296,8 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index fb058fd35c0d..880b2cf0a530 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -326,3 +326,23 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
return 0;
}
+
+int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int err;
+ u8 cmode;
+
+ if (port != 5)
+ return 0;
+
+ err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
+ if (err)
+ return err;
+
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 1897c01c6e19..b6e5fbd46b5e 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -19,6 +19,8 @@
#define MV88E6352_ADDR_SERDES 0x0f
#define MV88E6352_SERDES_PAGE_FIBER 0x01
+#define MV88E6341_ADDR_SERDES 0x15
+
#define MV88E6390_PORT9_LANE0 0x09
#define MV88E6390_PORT9_LANE1 0x12
#define MV88E6390_PORT9_LANE2 0x13
@@ -42,6 +44,7 @@
#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
+int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 600d5ad1fbde..757b6d90ea36 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -600,10 +600,13 @@ qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
}
static void
-qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
int i;
+ if (stringset != ETH_SS_STATS)
+ return;
+
for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
@@ -631,8 +634,11 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
}
static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port)
+qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
return ARRAY_SIZE(ar8327_mib);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..5bc168314ea2 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int vortex_rx(struct net_device *dev);
static int boomerang_rx(struct net_device *dev);
-static irqreturn_t vortex_interrupt(int irq, void *dev_id);
-static irqreturn_t boomerang_interrupt(int irq, void *dev_id);
+static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
+static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
+static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
static int vortex_close(struct net_device *dev);
static void dump_tx_ring(struct net_device *dev);
static void update_stats(void __iomem *ioaddr, struct net_device *dev);
@@ -838,11 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_vortex(struct net_device *dev)
{
- struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
- local_irq_save(flags);
- (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- local_irq_restore(flags);
+ vortex_boomerang_interrupt(dev->irq, dev);
}
#endif
@@ -1212,9 +1209,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
vp->mii.reg_num_mask = 0x1f;
/* Makes sure rings are at least 16 byte aligned. */
- vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- &vp->rx_ring_dma);
+ &vp->rx_ring_dma, GFP_KERNEL);
retval = -ENOMEM;
if (!vp->rx_ring)
goto free_device;
@@ -1476,11 +1473,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
return 0;
free_ring:
- pci_free_consistent(pdev,
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- vp->rx_ring,
- vp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring, vp->rx_ring_dma);
free_device:
free_netdev(dev);
pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1729,8 +1725,7 @@ vortex_open(struct net_device *dev)
dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
- if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
- boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
+ if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
@@ -1751,9 +1746,9 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- dma = pci_map_single(VORTEX_PCI(vp), skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ dma = dma_map_single(vp->gendev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma))
break;
vp->rx_ring[i].addr = cpu_to_le32(dma);
}
@@ -1905,18 +1900,7 @@ static void vortex_tx_timeout(struct net_device *dev)
pr_err("%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
- {
- /*
- * Block interrupts because vortex_interrupt does a bare spin_lock()
- */
- unsigned long flags;
- local_irq_save(flags);
- if (vp->full_bus_master_tx)
- boomerang_interrupt(dev->irq, dev);
- else
- vortex_interrupt(dev->irq, dev);
- local_irq_restore(flags);
- }
+ vortex_boomerang_interrupt(dev->irq, dev);
}
if (vortex_debug > 0)
@@ -2067,9 +2051,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
- vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -2168,9 +2152,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2162,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
int i;
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2173,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+ dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
frag->size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+ if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
- dma_unmap_page(&VORTEX_PCI(vp)->dev,
+ dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
DMA_TO_DEVICE);
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
goto out_dma_err;
}
@@ -2218,8 +2202,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#else
- dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(vp->gendev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2238,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
out:
return NETDEV_TX_OK;
out_dma_err:
- dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+ dev_err(vp->gendev, "Error mapping dma buffer\n");
goto out;
}
@@ -2267,9 +2251,8 @@ out_dma_err:
*/
static irqreturn_t
-vortex_interrupt(int irq, void *dev_id)
+_vortex_interrupt(int irq, struct net_device *dev)
{
- struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
@@ -2278,7 +2261,6 @@ vortex_interrupt(int irq, void *dev_id)
unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
- spin_lock(&vp->lock);
status = ioread16(ioaddr + EL3_STATUS);
@@ -2322,7 +2304,7 @@ vortex_interrupt(int irq, void *dev_id)
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
- pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
pkts_compl++;
bytes_compl += vp->tx_skb->len;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2376,7 +2358,6 @@ vortex_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
- spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
@@ -2386,9 +2367,8 @@ handler_exit:
*/
static irqreturn_t
-boomerang_interrupt(int irq, void *dev_id)
+_boomerang_interrupt(int irq, struct net_device *dev)
{
- struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
@@ -2398,12 +2378,6 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
-
- /*
- * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
- * and boomerang_start_xmit
- */
- spin_lock(&vp->lock);
vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2459,19 +2433,19 @@ boomerang_interrupt(int irq, void *dev_id)
struct sk_buff *skb = vp->tx_skbuff[entry];
#if DO_ZEROCOPY
int i;
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
- pci_unmap_page(VORTEX_PCI(vp),
+ dma_unmap_page(vp->gendev,
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
#else
- pci_unmap_single(VORTEX_PCI(vp),
- le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev,
+ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
#endif
pkts_compl++;
bytes_compl += skb->len;
@@ -2522,10 +2496,29 @@ boomerang_interrupt(int irq, void *dev_id)
dev->name, status);
handler_exit:
vp->handling_irq = 0;
- spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
+static irqreturn_t
+vortex_boomerang_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&vp->lock, flags);
+
+ if (vp->full_bus_master_rx)
+ ret = _boomerang_interrupt(dev->irq, dev);
+ else
+ ret = _vortex_interrupt(dev->irq, dev);
+
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ return ret;
+}
+
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
@@ -2561,14 +2554,14 @@ static int vortex_rx(struct net_device *dev)
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
- dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
+ pkt_len, DMA_FROM_DEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
- pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
} else {
ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
@@ -2635,11 +2628,11 @@ boomerang_rx(struct net_device *dev)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
skb_put_data(skb, vp->rx_skbuff[entry]->data,
pkt_len);
- pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_copy++;
} else {
/* Pre-allocate the replacement skb. If it or its
@@ -2651,9 +2644,9 @@ boomerang_rx(struct net_device *dev)
dev->stats.rx_dropped++;
goto clear_complete;
}
- newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ newdma = dma_map_single(vp->gendev, newskb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(vp->gendev, newdma)) {
dev->stats.rx_dropped++;
consume_skb(newskb);
goto clear_complete;
@@ -2664,7 +2657,7 @@ boomerang_rx(struct net_device *dev)
vp->rx_skbuff[entry] = newskb;
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
- pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2754,8 @@ vortex_close(struct net_device *dev)
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
- pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
@@ -2775,12 +2768,12 @@ vortex_close(struct net_device *dev)
int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
- pci_unmap_single(VORTEX_PCI(vp),
+ dma_unmap_single(vp->gendev,
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
#else
- pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3281,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, vp->ioaddr);
- pci_free_consistent(pdev,
- sizeof(struct boom_rx_desc) * RX_RING_SIZE
- + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
- vp->rx_ring,
- vp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE +
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring, vp->rx_ring_dma);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 9fee7c83ef9f..f2f0264c58ba 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -29,8 +29,8 @@ config PCMCIA_AXNET
called axnet_cs. If unsure, say N.
config AX88796
- tristate "ASIX AX88796 NE2000 clone support"
- depends on (ARM || MIPS || SUPERH)
+ tristate "ASIX AX88796 NE2000 clone support" if !ZORRO
+ depends on (ARM || MIPS || SUPERH || ZORRO || COMPILE_TEST)
select CRC32
select PHYLIB
select MDIO_BITBANG
@@ -45,6 +45,19 @@ config AX88796_93CX6
---help---
Select this if your platform comes with an external 93CX6 eeprom.
+config XSURF100
+ tristate "Amiga XSurf 100 AX88796/NE2000 clone support"
+ depends on ZORRO
+ select AX88796
+ select ASIX_PHY
+ help
+ This driver is for the Individual Computers X-Surf 100 Ethernet
+ card (based on the Asix AX88796 chip). If you have such a card,
+ say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xsurf100.
+
config HYDRA
tristate "Hydra support"
depends on ZORRO
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 1d650e66cc6e..85c83c566ec6 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
+obj-$(CONFIG_XSURF100) += xsurf100.o
obj-$(CONFIG_ZORRO8390) += zorro8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index da61cf3cb3a9..2a0ddec1dd56 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -163,6 +163,21 @@ static void ax_reset_8390(struct net_device *dev)
ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */
}
+/* Wrapper for __ei_interrupt for platforms that have a platform-specific
+ * way to find out whether the interrupt request might be caused by
+ * the ax88796 chip.
+ */
+static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ax_device *ax = to_ax_dev(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+
+ if (!ax->plat->check_irq(pdev))
+ return IRQ_NONE;
+
+ return ax_ei_interrupt(irq, dev_id);
+}
static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
@@ -387,6 +402,90 @@ static void ax_phy_switch(struct net_device *dev, int on)
ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
}
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (level)
+ ax->reg_memr |= AX_MEMR_MDC;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDC;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (output)
+ ax->reg_memr &= ~AX_MEMR_MDIR;
+ else
+ ax->reg_memr |= AX_MEMR_MDIR;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (value)
+ ax->reg_memr |= AX_MEMR_MDO;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDO;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+ int reg_memr = ei_inb(ax->addr_memr);
+
+ return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static const struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ax_bb_mdc,
+ .set_mdio_dir = ax_bb_dir,
+ .set_mdio_data = ax_bb_set_data,
+ .get_mdio_data = ax_bb_get_data,
+};
+
+static int ax_mii_init(struct net_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ int err;
+
+ ax->bb_ctrl.ops = &bb_ops;
+ ax->addr_memr = ei_local->mem + AX_MEMR;
+ ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+ if (!ax->mii_bus) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax->mii_bus->name = "ax88796_mii_bus";
+ ax->mii_bus->parent = dev->dev.parent;
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ err = mdiobus_register(ax->mii_bus);
+ if (err)
+ goto out_free_mdio_bitbang;
+
+ return 0;
+
+ out_free_mdio_bitbang:
+ free_mdio_bitbang(ax->mii_bus);
+ out:
+ return err;
+}
+
static int ax_open(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
@@ -394,8 +493,16 @@ static int ax_open(struct net_device *dev)
netdev_dbg(dev, "open\n");
- ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
- dev->name, dev);
+ ret = ax_mii_init(dev);
+ if (ret)
+ goto failed_mii;
+
+ if (ax->plat->check_irq)
+ ret = request_irq(dev->irq, ax_ei_interrupt_filtered,
+ ax->irqflags, dev->name, dev);
+ else
+ ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
+ dev->name, dev);
if (ret)
goto failed_request_irq;
@@ -421,6 +528,10 @@ static int ax_open(struct net_device *dev)
ax_phy_switch(dev, 0);
free_irq(dev->irq, dev);
failed_request_irq:
+ /* unregister mdiobus */
+ mdiobus_unregister(ax->mii_bus);
+ free_mdio_bitbang(ax->mii_bus);
+ failed_mii:
return ret;
}
@@ -440,6 +551,9 @@ static int ax_close(struct net_device *dev)
phy_disconnect(dev->phydev);
free_irq(dev->irq, dev);
+
+ mdiobus_unregister(ax->mii_bus);
+ free_mdio_bitbang(ax->mii_bus);
return 0;
}
@@ -539,92 +653,8 @@ static const struct net_device_ops ax_netdev_ops = {
#endif
};
-static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (level)
- ax->reg_memr |= AX_MEMR_MDC;
- else
- ax->reg_memr &= ~AX_MEMR_MDC;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (output)
- ax->reg_memr &= ~AX_MEMR_MDIR;
- else
- ax->reg_memr |= AX_MEMR_MDIR;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
-
- if (value)
- ax->reg_memr |= AX_MEMR_MDO;
- else
- ax->reg_memr &= ~AX_MEMR_MDO;
-
- ei_outb(ax->reg_memr, ax->addr_memr);
-}
-
-static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
-{
- struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
- int reg_memr = ei_inb(ax->addr_memr);
-
- return reg_memr & AX_MEMR_MDI ? 1 : 0;
-}
-
-static const struct mdiobb_ops bb_ops = {
- .owner = THIS_MODULE,
- .set_mdc = ax_bb_mdc,
- .set_mdio_dir = ax_bb_dir,
- .set_mdio_data = ax_bb_set_data,
- .get_mdio_data = ax_bb_get_data,
-};
-
/* setup code */
-static int ax_mii_init(struct net_device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev->dev.parent);
- struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
- int err;
-
- ax->bb_ctrl.ops = &bb_ops;
- ax->addr_memr = ei_local->mem + AX_MEMR;
- ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
- if (!ax->mii_bus) {
- err = -ENOMEM;
- goto out;
- }
-
- ax->mii_bus->name = "ax88796_mii_bus";
- ax->mii_bus->parent = dev->dev.parent;
- snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
-
- err = mdiobus_register(ax->mii_bus);
- if (err)
- goto out_free_mdio_bitbang;
-
- return 0;
-
- out_free_mdio_bitbang:
- free_mdio_bitbang(ax->mii_bus);
- out:
- return err;
-}
-
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
{
void __iomem *ioaddr = ei_local->mem;
@@ -669,10 +699,16 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_HAS_EEPROM) {
unsigned char SA_prom[32];
+ ei_outb(6, ioaddr + EN0_RCNTLO);
+ ei_outb(0, ioaddr + EN0_RCNTHI);
+ ei_outb(0, ioaddr + EN0_RSARLO);
+ ei_outb(0, ioaddr + EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD);
for (i = 0; i < sizeof(SA_prom); i += 2) {
SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
}
+ ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */
if (ax->plat->wordlength == 2)
for (i = 0; i < 16; i++)
@@ -741,18 +777,20 @@ static int ax_init_dev(struct net_device *dev)
#endif
ei_local->reset_8390 = &ax_reset_8390;
- ei_local->block_input = &ax_block_input;
- ei_local->block_output = &ax_block_output;
+ if (ax->plat->block_input)
+ ei_local->block_input = ax->plat->block_input;
+ else
+ ei_local->block_input = &ax_block_input;
+ if (ax->plat->block_output)
+ ei_local->block_output = ax->plat->block_output;
+ else
+ ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
- ret = ax_mii_init(dev);
- if (ret)
- goto err_out;
-
ax_NS8390_init(dev, 0);
ret = register_netdev(dev);
@@ -777,7 +815,6 @@ static int ax_remove(struct platform_device *pdev)
struct resource *mem;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
iounmap(ei_local->mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -789,6 +826,7 @@ static int ax_remove(struct platform_device *pdev)
release_mem_region(mem->start, resource_size(mem));
}
+ platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
@@ -835,6 +873,9 @@ static int ax_probe(struct platform_device *pdev)
dev->irq = irq->start;
ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
+ if (irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ ax->irqflags |= IRQF_SHARED;
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no MEM specified\n");
@@ -919,6 +960,7 @@ static int ax_probe(struct platform_device *pdev)
release_mem_region(mem->start, mem_size);
exit_mem:
+ platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index ac99d089ac72..1c97e39b478e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
+#if defined(CONFIG_MACH_TX49XX)
+# define DCR_VAL 0x48 /* 8-bit mode */
+#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
#else
# define DCR_VAL 0x49
diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c
new file mode 100644
index 000000000000..e2c963821ffe
--- /dev/null
+++ b/drivers/net/ethernet/8390/xsurf100.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/zorro.h>
+#include <net/ax88796.h>
+#include <asm/amigaints.h>
+
+#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \
+ ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0)
+
+#define XS100_IRQSTATUS_BASE 0x40
+#define XS100_8390_BASE 0x800
+
+/* Longword-access area. Translated to 2 16-bit access cycles by the
+ * X-Surf 100 FPGA
+ */
+#define XS100_8390_DATA32_BASE 0x8000
+#define XS100_8390_DATA32_SIZE 0x2000
+/* Sub-Areas for fast data register access; addresses relative to area begin */
+#define XS100_8390_DATA_READ32_BASE 0x0880
+#define XS100_8390_DATA_WRITE32_BASE 0x0C80
+#define XS100_8390_DATA_AREA_SIZE 0x80
+
+#define __NS8390_init ax_NS8390_init
+
+/* force unsigned long back to 'void __iomem *' */
+#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
+
+#define ei_inb(_a) z_readb(ax_convert_addr(_a))
+#define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a))
+
+#define ei_inw(_a) z_readw(ax_convert_addr(_a))
+#define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a))
+
+#define ei_inb_p(_a) ei_inb(_a)
+#define ei_outb_p(_v, _a) ei_outb(_v, _a)
+
+/* define EI_SHIFT() to take into account our register offsets */
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
+
+/* Ensure we have our RCR base value */
+#define AX88796_PLATFORM
+
+static unsigned char version[] =
+ "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
+
+#include "lib8390.c"
+
+/* from ne.c */
+#define NE_CMD EI_SHIFT(0x00)
+#define NE_RESET EI_SHIFT(0x1f)
+#define NE_DATAPORT EI_SHIFT(0x10)
+
+struct xsurf100_ax_plat_data {
+ struct ax_plat_data ax;
+ void __iomem *base_regs;
+ void __iomem *data_area;
+};
+
+static int is_xsurf100_network_irq(struct platform_device *pdev)
+{
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0;
+}
+
+/* These functions guarantee that the iomem is accessed with 32 bit
+ * cycles only. z_memcpy_fromio / z_memcpy_toio don't
+ */
+static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes)
+{
+ while (bytes > 32) {
+ asm __volatile__
+ ("movem.l (%0)+,%%d0-%%d7\n"
+ "movem.l %%d0-%%d7,(%1)\n"
+ "adda.l #32,%1" : "=a"(src), "=a"(dst)
+ : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4",
+ "d5", "d6", "d7", "memory");
+ bytes -= 32;
+ }
+ while (bytes) {
+ *(uint32_t *)dst = z_readl(src);
+ src += 4;
+ dst += 4;
+ bytes -= 4;
+ }
+}
+
+static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes)
+{
+ while (bytes) {
+ z_writel(*(const uint32_t *)src, dst);
+ src += 4;
+ dst += 4;
+ bytes -= 4;
+ }
+}
+
+static void xs100_write(struct net_device *dev, const void *src,
+ unsigned int count)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ /* copy whole blocks */
+ while (count > XS100_8390_DATA_AREA_SIZE) {
+ z_memcpy_toio32(xs100->data_area +
+ XS100_8390_DATA_WRITE32_BASE, src,
+ XS100_8390_DATA_AREA_SIZE);
+ src += XS100_8390_DATA_AREA_SIZE;
+ count -= XS100_8390_DATA_AREA_SIZE;
+ }
+ /* copy whole dwords */
+ z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE,
+ src, count & ~3);
+ src += count & ~3;
+ if (count & 2) {
+ ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT);
+ src += 2;
+ }
+ if (count & 1)
+ ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT);
+}
+
+static void xs100_read(struct net_device *dev, void *dst, unsigned int count)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ /* copy whole blocks */
+ while (count > XS100_8390_DATA_AREA_SIZE) {
+ z_memcpy_fromio32(dst, xs100->data_area +
+ XS100_8390_DATA_READ32_BASE,
+ XS100_8390_DATA_AREA_SIZE);
+ dst += XS100_8390_DATA_AREA_SIZE;
+ count -= XS100_8390_DATA_AREA_SIZE;
+ }
+ /* copy whole dwords */
+ z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE,
+ count & ~3);
+ dst += count & ~3;
+ if (count & 2) {
+ *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT);
+ dst += 2;
+ }
+ if (count & 1)
+ *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
+static void xs100_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
+
+ xs100_read(dev, buf, count);
+
+ ei_local->dmaing &= ~1;
+}
+
+static void xs100_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ unsigned long dma_start;
+
+ /* Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390? I
+ * should check someday.
+ */
+ if (ei_local->word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing
+ * you'll see
+ */
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD);
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(0x00, nic_base + EN0_RSARLO);
+ ei_outb(start_page, nic_base + EN0_RSARHI);
+
+ ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
+
+ xs100_write(dev, buf, count);
+
+ dma_start = jiffies;
+
+ while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
+ if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ ei_local->reset_8390(dev);
+ ax_NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_local->dmaing &= ~0x01;
+}
+
+static int xsurf100_probe(struct zorro_dev *zdev,
+ const struct zorro_device_id *ent)
+{
+ struct platform_device *pdev;
+ struct xsurf100_ax_plat_data ax88796_data;
+ struct resource res[2] = {
+ DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL,
+ IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE),
+ DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE,
+ 4 * 0x20)
+ };
+ int reg;
+ /* This table is referenced in the device structure, so it must
+ * outlive the scope of xsurf100_probe.
+ */
+ static u32 reg_offsets[32];
+ int ret = 0;
+
+ /* X-Surf 100 control and 32 bit ring buffer data access areas.
+ * These resources are not used by the ax88796 driver, so must
+ * be requested here and passed via platform data.
+ */
+
+ if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) {
+ dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n");
+ return -ENXIO;
+ }
+
+ if (!request_mem_region(zdev->resource.start +
+ XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE,
+ "X-Surf 100 32-bit data access")) {
+ dev_err(&zdev->dev, "cannot reserve 32-bit area\n");
+ ret = -ENXIO;
+ goto exit_req;
+ }
+
+ for (reg = 0; reg < 0x20; reg++)
+ reg_offsets[reg] = 4 * reg;
+
+ memset(&ax88796_data, 0, sizeof(ax88796_data));
+ ax88796_data.ax.flags = AXFLG_HAS_EEPROM;
+ ax88796_data.ax.wordlength = 2;
+ ax88796_data.ax.dcr_val = 0x48;
+ ax88796_data.ax.rcr_val = 0x40;
+ ax88796_data.ax.reg_offsets = reg_offsets;
+ ax88796_data.ax.check_irq = is_xsurf100_network_irq;
+ ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100);
+
+ /* error handling for ioremap regs */
+ if (!ax88796_data.base_regs) {
+ dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n",
+ &zdev->resource);
+
+ ret = -ENXIO;
+ goto exit_req2;
+ }
+
+ ax88796_data.data_area = ioremap(zdev->resource.start +
+ XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE);
+
+ /* error handling for ioremap data */
+ if (!ax88796_data.data_area) {
+ dev_err(&zdev->dev,
+ "Cannot ioremap area %pR offset %x (32-bit access)\n",
+ &zdev->resource, XS100_8390_DATA32_BASE);
+
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ ax88796_data.ax.block_output = xs100_block_output;
+ ax88796_data.ax.block_input = xs100_block_input;
+
+ pdev = platform_device_register_resndata(&zdev->dev, "ax88796",
+ zdev->slotaddr, res, 2,
+ &ax88796_data,
+ sizeof(ax88796_data));
+
+ if (IS_ERR(pdev)) {
+ dev_err(&zdev->dev, "cannot register platform device\n");
+ ret = -ENXIO;
+ goto exit_mem2;
+ }
+
+ zorro_set_drvdata(zdev, pdev);
+
+ if (!ret)
+ return 0;
+
+ exit_mem2:
+ iounmap(ax88796_data.data_area);
+
+ exit_mem:
+ iounmap(ax88796_data.base_regs);
+
+ exit_req2:
+ release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE);
+
+ exit_req:
+ release_mem_region(zdev->resource.start, 0x100);
+
+ return ret;
+}
+
+static void xsurf100_remove(struct zorro_dev *zdev)
+{
+ struct platform_device *pdev = zorro_get_drvdata(zdev);
+ struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
+
+ platform_device_unregister(pdev);
+
+ iounmap(xs100->base_regs);
+ release_mem_region(zdev->resource.start, 0x100);
+ iounmap(xs100->data_area);
+ release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
+ XS100_8390_DATA32_SIZE);
+}
+
+static const struct zorro_device_id xsurf100_zorro_tbl[] = {
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl);
+
+static struct zorro_driver xsurf100_driver = {
+ .name = "xsurf100",
+ .id_table = xsurf100_zorro_tbl,
+ .probe = xsurf100_probe,
+ .remove = xsurf100_remove,
+};
+
+module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver);
+
+MODULE_DESCRIPTION("X-Surf 100 driver");
+MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 603a5704dab8..af766fd61151 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,9 +33,9 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
@@ -72,16 +72,16 @@ source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
-source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
+source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/i825xx/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -115,6 +115,7 @@ source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/mscc/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -160,20 +161,21 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
+source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
-source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/silan/Kconfig"
-source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
+source "drivers/net/ethernet/silan/Kconfig"
+source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
@@ -182,6 +184,5 @@ source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
-source "drivers/net/ethernet/synopsys/Kconfig"
endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2bfd2eea50bf..8fbfe9ce2fa5 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index c99e3e845ac0..a90080f12e67 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE;
}
-
- }
- else if(tx_pkt_size >= 1024){
- if (tx_pkt_size >= 1024){
- if(coal_conf->tx_coal_type != HIGH_COALESCE){
- coal_conf->tx_timeout = 4;
- coal_conf->tx_event_count = 8;
- amd8111e_set_coalesce(dev,TX_INTR_COAL);
- coal_conf->tx_coal_type = HIGH_COALESCE;
- }
+ } else if (tx_pkt_size >= 1024) {
+ if (coal_conf->tx_coal_type != HIGH_COALESCE) {
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev, TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
}
}
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ea72ef11a55..d272dc6984ac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1321,6 +1321,10 @@
#define MDIO_VEND2_AN_STAT 0x8002
#endif
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
@@ -1369,6 +1373,10 @@
#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
+#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 7d128be61310..b91143947ed2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
"debugfs_create_file failed\n");
}
+ if (pdata->vdata->an_cdr_workaround) {
+ pfile = debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_bool failed\n");
+
+ pfile = debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
+ if (!pfile)
+ netdev_err(pdata->netdev,
+ "debugfs_create_bool failed\n");
+ }
+
kfree(buf);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 795e556d4a3f..441d0973957b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Call MDIO/PHY initialization routine */
+ pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
ret = pdata->phy_if.phy_init(pdata);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 072b9f664597..1b45cd73a258 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
xgbe_an73_set(pdata, false, false);
xgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+
netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
}
static void xgbe_an_restart(struct xgbe_prv_data *pdata)
{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
static void xgbe_an_disable(struct xgbe_prv_data *pdata)
{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
switch (pdata->an_mode) {
case XGBE_AN_MODE_CL73:
case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
reg);
- if (pdata->phy_if.phy_impl.kr_training_post)
- pdata->phy_if.phy_impl.kr_training_post(pdata);
-
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
}
return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
return XGBE_AN_NO_LINK;
}
- xgbe_an73_disable(pdata);
+ xgbe_an_disable(pdata);
xgbe_switch_mode(pdata);
- xgbe_an73_restart(pdata);
+ xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
}
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
pdata->an_result = pdata->an_state;
pdata->an_state = XGBE_AN_READY;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
@@ -903,6 +914,9 @@ again:
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
xgbe_state_as_string(pdata->an_result));
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index eb23f9ba1a9a..82d1f416ee2a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
};
static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
+ .an_cdr_workaround = 1,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3304a291aa96..aac884314000 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -147,6 +147,14 @@
/* Rate-change complete wait/retry count */
#define XGBE_RATECHANGE_COUNT 500
+/* CDR delay values for KR support (in usec) */
+#define XGBE_CDR_DELAY_INIT 10000
+#define XGBE_CDR_DELAY_INC 10000
+#define XGBE_CDR_DELAY_MAX 100000
+
+/* RRC frequency during link status check */
+#define XGBE_RRC_FREQUENCY 10
+
enum xgbe_port_mode {
XGBE_PORT_MODE_RSVD = 0,
XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_VENDOR_SN 4
#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
+#define XGBE_SFP_EXTD_OPT1 1
+#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
+#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
+
#define XGBE_SFP_EXTD_DIAG 28
#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
unsigned int sfp_gpio_address;
unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_inputs;
unsigned int sfp_gpio_rx_los;
unsigned int sfp_gpio_tx_fault;
unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
unsigned int redrv_addr;
unsigned int redrv_lane;
unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
};
/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
phy_data->sfp_phy_avail = 1;
}
+static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
+{
+ u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+ if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
+ return false;
+
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
+ return true;
+
+ return false;
+}
+
+static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+{
+ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
+ return false;
+
+ if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
+ return true;
+
+ return false;
+}
+
static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
return;
+ /* Update transceiver signals (eeprom extd/options) */
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
if (xgbe_phy_sfp_parse_quirks(pdata))
return;
@@ -1184,7 +1248,6 @@ put:
static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int gpio_input;
u8 gpio_reg, gpio_ports[2];
int ret;
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
return;
}
- gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
-
- if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
- /* No GPIO, just assume the module is present for now */
- phy_data->sfp_mod_absent = 0;
- } else {
- if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
- phy_data->sfp_mod_absent = 0;
- }
-
- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
- (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
- phy_data->sfp_rx_los = 1;
+ phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
- if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
- (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
- phy_data->sfp_tx_fault = 1;
+ phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
}
static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 1;
/* No link, attempt a receiver reset cycle */
- if (phy_data->rrc_count++) {
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
xgbe_phy_rrc(pdata);
}
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
return true;
}
+static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ usleep_range(phy_data->phy_cdr_delay,
+ phy_data->phy_cdr_delay + 500);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->debugfs_an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ XGBE_PMA_CDR_TRACK_EN_MASK,
+ XGBE_PMA_CDR_TRACK_EN_OFF);
+
+ xgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+ if (pdata->debugfs_an_cdr_track_early)
+ xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case XGBE_AN_READY:
+ case XGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
+ else
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case XGBE_AN_MODE_CL73:
+ case XGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != XGBE_MODE_KR)
+ break;
+
+ xgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
xgbe_phy_sfp_reset(phy_data);
xgbe_phy_sfp_mod_absent(pdata);
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
/* Power off the PHY */
xgbe_phy_power_off(pdata);
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* Start in highest supported mode */
xgbe_phy_set_mode(pdata, phy_data->start_mode);
+ /* Reset CDR support */
+ xgbe_phy_cdr_track(pdata);
+
/* After starting the I2C controller, we can check for an SFP */
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
}
}
+ phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+
/* Register for driving external PHYs */
mii = devm_mdiobus_alloc(pdata->dev);
if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
phy_impl->an_advertising = xgbe_phy_an_advertising;
phy_impl->an_outcome = xgbe_phy_an_outcome;
+
+ phy_impl->an_pre = xgbe_phy_an_pre;
+ phy_impl->an_post = xgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad102c8bac7b..95d4b56448c6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
/* This structure represents implementation specific routines for an
* implementation of a PHY. All routines are required unless noted below.
* Optional routines:
+ * an_pre, an_post
* kr_training_pre, kr_training_post
*/
struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
/* Process results of auto-negotiation */
enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct xgbe_prv_data *);
+ void (*an_post)(struct xgbe_prv_data *);
+
/* Pre/Post KR training enablement support */
void (*kr_training_pre)(struct xgbe_prv_data *);
void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
unsigned int irq_reissue_support;
unsigned int tx_desc_prefetch;
unsigned int rx_desc_prefetch;
+ unsigned int an_cdr_workaround;
};
struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
unsigned int debugfs_xprop_reg;
unsigned int debugfs_xi2c_reg;
+
+ bool debugfs_an_cdr_workaround;
+ bool debugfs_an_cdr_track_early;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 32f6d2e24d66..1a1a6380c128 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -95,6 +95,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
+ cfg->vecs = min(cfg->vecs, self->irqvecs);
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
cfg->vecs = 8U;
@@ -246,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
+ self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 219b550d1665..faa533a0ec47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -80,6 +80,7 @@ struct aq_nic_s {
struct pci_dev *pdev;
unsigned int msix_entry_mask;
+ u32 irqvecs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index ecc6306f940f..a50e08bb4748 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -267,16 +267,16 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min(numvecs, num_online_cpus());
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
- PCI_IRQ_MSIX);
-
- if (err < 0) {
- err = pci_alloc_irq_vectors(self->pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (err < 0)
- goto err_hwinit;
+ numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI |
+ PCI_IRQ_LEGACY);
+
+ if (numvecs < 0) {
+ err = numvecs;
+ goto err_hwinit;
}
#endif
+ self->irqvecs = numvecs;
/* net device init */
aq_nic_cfg_start(self);
@@ -298,9 +298,9 @@ err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
free_netdev(ndev);
-err_pci_func:
- pci_release_regions(pdev);
err_ndev:
+ pci_release_regions(pdev);
+err_pci_func:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f9a3c1a76d5d..d5fca2e5a9bc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,7 +654,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
pkts = priv->rx_max_coalesced_frames;
if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
- moder = net_dim_get_def_profile(priv->dim.dim.mode);
+ moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
@@ -1064,7 +1064,7 @@ static void bcm_sysport_dim_work(struct work_struct *work)
struct bcm_sysport_priv *priv =
container_of(ndim, struct bcm_sysport_priv, dim);
struct net_dim_cq_moder cur_profile =
- net_dim_get_profile(dim->mode, dim->profile_ix);
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
dim->state = NET_DIM_START_MEASURE;
@@ -1437,7 +1437,7 @@ static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
/* If DIM was enabled, re-apply default parameters */
if (dim->use_dim) {
- moder = net_dim_get_def_profile(dim->dim.mode);
+ moder = net_dim_get_def_rx_moderation(dim->dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
@@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct net_device *dev,
+static int bcm_sysport_map_queues(struct notifier_block *nb,
struct dsa_notifier_register_info *info)
{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_priv *priv;
struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, start, port;
+ struct net_device *dev;
+
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+ if (priv->netdev != info->master)
+ return 0;
+
+ dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
@@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev,
if (priv->is_lite)
netif_set_real_num_tx_queues(slave_dev,
slave_dev->num_tx_queues / 2);
+
num_tx_queues = slave_dev->real_num_tx_queues;
if (priv->per_port_num_tx_queues &&
priv->per_port_num_tx_queues != num_tx_queues)
- netdev_warn(slave_dev, "asymetric number of per-port queues\n");
+ netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
priv->per_port_num_tx_queues = num_tx_queues;
@@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
+static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dsa_notifier_register_info *info;
@@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
info = ptr;
- return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
+ return notifier_from_errno(bcm_sysport_map_queues(nb, info));
}
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 7c560d545c03..5a779b19d149 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
+bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f83769d8047b..dfa0839f6656 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -62,6 +62,7 @@
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
+#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring;
+ u8 qidx;
ring = &txr->tx_ring_struct;
@@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
- ring->queue_id = bp->q_info[j].queue_id;
+ qidx = bp->tc_to_qidx[j];
+ ring->queue_id = bp->q_info[qidx].queue_id;
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* convert timeout to usec */
+ timeout *= 1000;
i = 0;
- tmo_count = timeout * 40;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
i++ < tmo_count) {
- usleep_range(25, 40);
+ /* on first few passes, just barely sleep */
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ else
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -3513,25 +3530,34 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_RESP_LEN_SFT;
valid = bp->hwrm_cmd_resp_addr + len - 1;
} else {
+ int j;
+
/* Check if response len is updated */
for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
- usleep_range(25, 40);
+ /* on first few passes, just barely sleep */
+ if (i < DFLT_HWRM_CMD_TIMEOUT)
+ usleep_range(HWRM_SHORT_MIN_TIMEOUT,
+ HWRM_SHORT_MAX_TIMEOUT);
+ else
+ usleep_range(HWRM_MIN_TIMEOUT,
+ HWRM_MAX_TIMEOUT);
}
if (i >= tmo_count) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- timeout, le16_to_cpu(req->req_type),
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len);
return -1;
}
/* Last byte of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 1;
- for (i = 0; i < 5; i++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
@@ -3539,9 +3565,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
udelay(1);
}
- if (i >= 5) {
+ if (j >= HWRM_VALID_BIT_DELAY_USEC) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- timeout, le16_to_cpu(req->req_type),
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
return -1;
}
@@ -4334,26 +4361,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || err) {
- switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
- netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- case RING_FREE_REQ_RING_TYPE_RX:
- netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- case RING_FREE_REQ_RING_TYPE_TX:
- netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
- rc, err);
- return -1;
-
- default:
- netdev_err(bp->dev, "Invalid ring\n");
- return -1;
- }
+ netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
+ ring_type, rc, err);
+ return -EIO;
}
ring->fw_ring_id = ring_id;
return rc;
@@ -4477,23 +4487,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || error_code) {
- switch (ring_type) {
- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
- netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
- rc);
- return rc;
- case RING_FREE_REQ_RING_TYPE_RX:
- netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
- rc);
- return rc;
- case RING_FREE_REQ_RING_TYPE_TX:
- netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
- rc);
- return rc;
- default:
- netdev_err(bp->dev, "Invalid ring\n");
- return -1;
- }
+ netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
+ ring_type, rc, error_code);
+ return -EIO;
}
return 0;
}
@@ -4721,6 +4717,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
+ req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
+ req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -5309,6 +5309,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
bp->q_info[i].queue_profile = *qptr++;
+ bp->tc_to_qidx[i] = i;
}
qportcfg_exit:
@@ -5376,7 +5377,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
struct tm tm;
time64_t now = ktime_get_real_seconds();
- if (bp->hwrm_spec_code < 0x10400)
+ if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
+ bp->hwrm_spec_code < 0x10400)
return -EOPNOTSUPP;
time64_to_tm(now, 0, &tm);
@@ -5958,6 +5960,9 @@ static int bnxt_init_msix(struct bnxt *bp)
if (total_vecs > max)
total_vecs = max;
+ if (!total_vecs)
+ return 0;
+
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -6457,6 +6462,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
diff = link_info->support_auto_speeds ^ link_info->advertising;
if ((link_info->support_auto_speeds | diff) !=
link_info->support_auto_speeds) {
@@ -6843,6 +6851,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
}
}
+static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -6850,6 +6860,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ /* Reserve rings now if none were reserved at driver probe. */
+ rc = bnxt_init_dflt_ring_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to reserve default rings at open\n");
+ return rc;
+ }
rc = bnxt_reserve_rings(bp);
if (rc)
return rc;
@@ -6877,6 +6893,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
@@ -6909,6 +6926,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return 0;
open_err:
+ bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
bnxt_del_napi(bp);
@@ -7002,6 +7020,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+ bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
@@ -7279,6 +7298,25 @@ skip_uc:
return rc;
}
+static bool bnxt_can_reserve_rings(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ /* No minimum rings were provisioned by the PF. Don't
+ * reserve rings by default when device is down.
+ */
+ if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
+ return true;
+
+ if (!netif_running(bp->dev))
+ return false;
+ }
+#endif
+ return true;
+}
+
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
@@ -7295,7 +7333,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -7729,7 +7767,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
- coal->idle_thresh = 25;
+ coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
@@ -8529,6 +8567,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ if (!bnxt_can_reserve_rings(bp))
+ return 0;
+
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
@@ -8574,6 +8615,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
+static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
+{
+ int rc;
+
+ if (bp->tx_nr_rings)
+ return 0;
+
+ rc = bnxt_set_dflt_rings(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ return rc;
+ }
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ bp->dev->features |= NETIF_F_NTUPLE;
+ }
+ return 0;
+}
+
int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
@@ -8614,8 +8678,8 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
}
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
#endif
}
return rc;
@@ -9078,6 +9142,7 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ bnxt_debug_init();
return pci_register_driver(&bnxt_pci_driver);
}
@@ -9086,6 +9151,7 @@ static void __exit bnxt_exit(void)
pci_unregister_driver(&bnxt_pci_driver);
if (bnxt_pf_wq)
destroy_workqueue(bnxt_pf_wq);
+ bnxt_debug_exit();
}
module_init(bnxt_init);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3d55d3b56865..9b14eb610b9f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -532,6 +532,19 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
+#define HWRM_SHORT_MIN_TIMEOUT 3
+#define HWRM_SHORT_MAX_TIMEOUT 10
+#define HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define HWRM_MIN_TIMEOUT 25
+#define HWRM_MAX_TIMEOUT 40
+
+#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
+ ((n) * HWRM_SHORT_MIN_TIMEOUT) : \
+ (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
+ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
+
+#define HWRM_VALID_BIT_DELAY_USEC 20
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
@@ -1242,6 +1255,7 @@ struct bnxt {
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+ u8 tc_to_qidx[BNXT_MAX_QUEUE];
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1384,6 +1398,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
+ struct dentry *debugfs_pdev;
+ struct dentry *debugfs_dim;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1398,8 +1414,7 @@ struct bnxt {
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
-#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
-#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define SFF_DIAG_SUPPORT_OFFSET 0x5c
#define SFF_MODULE_ID_SFP 0x3
#define SFF_MODULE_ID_QSFP 0xc
#define SFF_MODULE_ID_QSFP_PLUS 0xd
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 3c746f2d9ed8..d5bc72cecde3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -21,6 +21,21 @@
#include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB
+static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
+{
+ int i, j;
+
+ for (i = 0; i < bp->max_tc; i++) {
+ if (bp->q_info[i].queue_id == queue_id) {
+ for (j = 0; j < bp->max_tc; j++) {
+ if (bp->tc_to_qidx[j] == i)
+ return j;
+ }
+ }
+ }
+ return -EINVAL;
+}
+
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
pri2cos = &req.pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 qidx;
+
req.enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
- pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
+ qidx = bp->tc_to_qidx[ets->prio_tc[i]];
+ pri2cos[i] = bp->q_info[qidx].queue_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return rc;
@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
- int i, j;
+ int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 queue_id = pri2cos[i];
+ int tc;
- for (j = 0; j < bp->max_tc; j++) {
- if (bp->q_info[j].queue_id == queue_id) {
- ets->prio_tc[i] = j;
- break;
- }
- }
+ tc = bnxt_queue_to_tc(bp, queue_id);
+ if (tc >= 0)
+ ets->prio_tc[i] = tc;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
void *data;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
- data = &req.unused_0;
- for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < max_tc; i++) {
+ u8 qidx;
+
req.enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
memset(&cos2bw, 0, sizeof(cos2bw));
- cos2bw.queue_id = bp->q_info[i].queue_id;
+ qidx = bp->tc_to_qidx[i];
+ cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
+ data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
- if (i == 0) {
+ if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0;
}
@@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
- int j;
+ int tc;
memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
- for (j = 0; j < bp->max_tc; j++) {
- if (bp->q_info[j].queue_id != cos2bw.queue_id)
- continue;
- if (cos2bw.tsa ==
- QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
- } else {
- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
- ets->tc_tx_bw[j] = cos2bw.bw_weight;
- }
+ tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
+ if (tc < 0)
+ continue;
+
+ if (cos2bw.tsa ==
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
+ } else {
+ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
+ ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
-static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
+static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
{
- struct hwrm_queue_cfg_input req = {0};
- int i;
+ unsigned long qmap = 0;
+ int max = bp->max_tc;
+ int i, j, rc;
- if (netif_running(bp->dev))
- bnxt_tx_disable(bp);
+ /* Assign lossless TCs first */
+ for (i = 0, j = 0; i < max; ) {
+ if (lltc_mask & (1 << i)) {
+ if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
+ bp->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ i++;
+ }
+ j++;
+ continue;
+ }
+ i++;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
- req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
- req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
+ for (i = 0, j = 0; i < max; i++) {
+ if (lltc_mask & (1 << i))
+ continue;
+ j = find_next_zero_bit(&qmap, max, j);
+ bp->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ j++;
+ }
- /* Configure lossless queues to lossy first */
- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
- for (i = 0; i < bp->max_tc; i++) {
- if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
- hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
- bp->q_info[i].queue_profile =
- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
+ return rc;
}
}
-
- /* Now configure desired queues to lossless */
- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
- for (i = 0; i < bp->max_tc; i++) {
- if (lltc_mask & (1 << i)) {
- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
- hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
- bp->q_info[i].queue_profile =
- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+ if (bp->ieee_ets) {
+ int tc = netdev_get_num_tc(bp->dev);
+
+ if (!tc)
+ tc = 1;
+ rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
+ if (rc) {
+ netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
+ return rc;
}
}
- if (netif_running(bp->dev))
- bnxt_tx_enable(bp);
-
return 0;
}
@@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
- bool need_q_recfg = false;
+ bool need_q_remap = false;
int rc;
if (!my_ets)
@@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (lltc_count > bp->max_lltc)
return -EINVAL;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
- req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
-
for (i = 0; i < bp->max_tc; i++) {
if (tc_mask & (1 << i)) {
- if (!BNXT_LLQ(bp->q_info[i].queue_profile))
- need_q_recfg = true;
+ u8 qidx = bp->tc_to_qidx[i];
+
+ if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
+ need_q_remap = true;
+ break;
+ }
}
}
- if (need_q_recfg)
- rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
+ if (need_q_remap)
+ rc = bnxt_queue_remap(bp, tc_mask);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+ req.flags = cpu_to_le32(pri_mask);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
new file mode 100644
index 000000000000..94e208e9789f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -0,0 +1,124 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "bnxt_hsi.h"
+#include <linux/net_dim.h>
+#include "bnxt.h"
+#include "bnxt_debugfs.h"
+
+static struct dentry *bnxt_debug_mnt;
+
+static ssize_t debugfs_dim_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct net_dim *dim = filep->private_data;
+ int len;
+ char *buf;
+
+ if (*ppos)
+ return 0;
+ if (!dim)
+ return -ENODEV;
+ buf = kasprintf(GFP_KERNEL,
+ "state = %d\n" \
+ "profile_ix = %d\n" \
+ "mode = %d\n" \
+ "tune_state = %d\n" \
+ "steps_right = %d\n" \
+ "steps_left = %d\n" \
+ "tired = %d\n",
+ dim->state,
+ dim->profile_ix,
+ dim->mode,
+ dim->tune_state,
+ dim->steps_right,
+ dim->steps_left,
+ dim->tired);
+ if (!buf)
+ return -ENOMEM;
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations debugfs_dim_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = debugfs_dim_read,
+};
+
+static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
+ struct dentry *dd)
+{
+ static char qname[16];
+
+ snprintf(qname, 10, "%d", ring_idx);
+ return debugfs_create_file(qname, 0600, dd,
+ dim, &debugfs_dim_fops);
+}
+
+void bnxt_debug_dev_init(struct bnxt *bp)
+{
+ const char *pname = pci_name(bp->pdev);
+ struct dentry *pdevf;
+ int i;
+
+ bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
+ if (bp->debugfs_pdev) {
+ pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
+ if (!pdevf) {
+ pr_err("failed to create debugfs entry %s/dim\n",
+ pname);
+ return;
+ }
+ bp->debugfs_dim = pdevf;
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring) {
+ pdevf = debugfs_dim_ring_init(&cpr->dim, i,
+ bp->debugfs_dim);
+ if (!pdevf)
+ pr_err("failed to create debugfs entry %s/dim/%d\n",
+ pname, i);
+ }
+ }
+ } else {
+ pr_err("failed to create debugfs entry %s\n", pname);
+ }
+}
+
+void bnxt_debug_dev_exit(struct bnxt *bp)
+{
+ if (bp) {
+ debugfs_remove_recursive(bp->debugfs_pdev);
+ bp->debugfs_pdev = NULL;
+ }
+}
+
+void bnxt_debug_init(void)
+{
+ bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
+ if (!bnxt_debug_mnt)
+ pr_err("failed to init bnxt_en debugfs\n");
+}
+
+void bnxt_debug_exit(void)
+{
+ debugfs_remove_recursive(bnxt_debug_mnt);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
new file mode 100644
index 000000000000..d0bb4887acd0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+#ifdef CONFIG_DEBUG_FS
+void bnxt_debug_init(void);
+void bnxt_debug_exit(void);
+void bnxt_debug_dev_init(struct bnxt *bp);
+void bnxt_debug_dev_exit(struct bnxt *bp);
+#else
+static inline void bnxt_debug_init(void) {}
+static inline void bnxt_debug_exit(void) {}
+static inline void bnxt_debug_dev_init(struct bnxt *bp) {}
+static inline void bnxt_debug_dev_exit(struct bnxt *bp) {}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 408dd190331e..afa97c8bb081 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -21,11 +21,11 @@ void bnxt_dim_work(struct work_struct *work)
struct bnxt_napi *bnapi = container_of(cpr,
struct bnxt_napi,
cp_ring);
- struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
- dim->profile_ix);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
- cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+ cpr->rx_ring_coal.coal_ticks = cur_moder.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_moder.pkts;
bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
dim->state = NET_DIM_START_MEASURE;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f622ca2a64f..7270c8b0cef3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -140,6 +140,19 @@ reset_coalesce:
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+enum {
+ RX_TOTAL_DISCARDS,
+ TX_TOTAL_DISCARDS,
+};
+
+static struct {
+ u64 counter;
+ char string[ETH_GSTRING_LEN];
+} bnxt_sw_func_stats[] = {
+ {0, "rx_total_discard_pkts"},
+ {0, "tx_total_discard_pkts"},
+};
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -237,6 +250,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
};
+#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
@@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ num_stats += BNXT_NUM_SW_FUNC_STATS;
+
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
@@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
if (!bp->bnapi)
return;
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
+ bnxt_sw_func_stats[i].counter = 0;
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
+
+ bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
+ le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
+ bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
+ le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
}
+
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
+ buf[j] = bnxt_sw_func_stats[i].counter;
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
@@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
+ strcpy(buf, bnxt_sw_func_stats[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
strcpy(buf, bnxt_port_stats_arr[i].string);
@@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev,
* to renable
*/
}
+ } else {
+ rc = bnxt_reserve_rings(bp);
}
return rc;
@@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
static int bnxt_get_eeprom_len(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_VF(bp))
+ return 0;
+
/* The -1 return value allows the entire 32-bit range of offsets to be
* passed via the ethtool command-line utility.
*/
@@ -1927,22 +1967,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
+static void bnxt_get_pkgver(struct net_device *dev)
{
+ struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
- u32 datalen;
+ char *pkgver;
+ u32 pkglen;
+ u8 *pkgbuf;
+ int len;
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &datalen) != 0)
- return NULL;
+ &index, NULL, &pkglen) != 0)
+ return;
- memset(buf, 0, buflen);
- if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
- return NULL;
+ pkgbuf = kzalloc(pkglen, GFP_KERNEL);
+ if (!pkgbuf) {
+ dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
+ pkglen);
+ return;
+ }
+
+ if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ goto err;
- return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
- datalen);
+ pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
+ pkglen);
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ len = strlen(bp->fw_ver_str);
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ "/pkg %s", pkgver);
+ }
+err:
+ kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -2127,9 +2184,8 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
static int bnxt_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
+ u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
struct bnxt *bp = netdev_priv(dev);
- struct hwrm_port_phy_i2c_read_input req = {0};
- struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
int rc;
/* No point in going further if phy status indicates
@@ -2144,21 +2200,19 @@ static int bnxt_get_module_info(struct net_device *dev,
if (bp->hwrm_spec_code < 0x10202)
return -EOPNOTSUPP;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
- req.i2c_slave_addr = I2C_DEV_ADDR_A0;
- req.page_number = 0;
- req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
- req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
- req.port_id = cpu_to_le16(bp->pf.port_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
+ SFF_DIAG_SUPPORT_OFFSET + 1,
+ data);
if (!rc) {
- u32 module_id = le32_to_cpu(output->data[0]);
+ u8 module_id = data[0];
+ u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
switch (module_id) {
case SFF_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ if (!diag_supported)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case SFF_MODULE_ID_QSFP:
case SFF_MODULE_ID_QSFP_PLUS:
@@ -2174,7 +2228,6 @@ static int bnxt_get_module_info(struct net_device *dev,
break;
}
}
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -2615,22 +2668,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct hwrm_selftest_qlist_input req = {0};
struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
- char *pkglog;
int i, rc;
- pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
- if (pkglog) {
- char *pkgver;
- int len;
+ bnxt_get_pkgver(dev);
- pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
- len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
- }
- kfree(pkglog);
- }
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 73f2249555b5..83444811d3c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
-#define BNX_PKG_LOG_MAX_LENGTH 4096
-
enum bnxnvm_pkglog_field_index {
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f952963d594e..a64910892c25 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
- req.min_rsscos_ctx = cpu_to_le16(1);
- req.max_rsscos_ctx = cpu_to_le16(1);
+ req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
+ req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
req.min_cmpl_rings = cpu_to_le16(1);
req.min_tx_rings = cpu_to_le16(1);
req.min_rx_rings = cpu_to_le16(1);
- req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
req.min_vnics = cpu_to_le16(1);
req.min_stat_ctx = cpu_to_le16(1);
req.min_hw_ring_grps = cpu_to_le16(1);
@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings);
req.min_rx_rings = cpu_to_le16(vf_rx_rings);
- req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings);
req.max_rx_rings = cpu_to_le16(vf_rx_rings);
- req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input req = {0};
struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
/* Set the new target id */
@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_reject_fwd_resp_input req = {0};
struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_exec_fwd_resp_input req = {0};
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
+ return -EINVAL;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
@@ -914,7 +923,8 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
if (is_valid_ether_addr(req->dflt_mac_addr) &&
((vf->flags & BNXT_VF_TRUST) ||
- (!is_valid_ether_addr(vf->mac_addr)))) {
+ !is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index d10f6f6c7860..e9b20cd19881 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -11,6 +11,23 @@
#ifndef BNXT_SRIOV_H
#define BNXT_SRIOV_H
+#define BNXT_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
+ sizeof(struct hwrm_fwd_resp_input))
+
+#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
+ offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
+
+#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \
+ ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
+ offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
+
+#define BNXT_VF_MIN_RSS_CTX 1
+#define BNXT_VF_MAX_RSS_CTX 1
+#define BNXT_VF_MIN_L2_CTX 1
+#define BNXT_VF_MAX_L2_CTX 4
+
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1389ab5e05df..1f0e872d0667 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -113,10 +113,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
+ *len = xdp.data_end - xdp.data;
if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
- *len = xdp.data_end - xdp.data;
}
switch (act) {
case XDP_PASS:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0445f2c0c629..20c1681bb1af 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -652,7 +652,7 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
pkts = ring->rx_max_coalesced_frames;
if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
- moder = net_dim_get_def_profile(ring->dim.dim.mode);
+ moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
@@ -1925,7 +1925,7 @@ static void bcmgenet_dim_work(struct work_struct *work)
struct bcmgenet_rx_ring *ring =
container_of(ndim, struct bcmgenet_rx_ring, dim);
struct net_dim_cq_moder cur_profile =
- net_dim_get_profile(dim->mode, dim->profile_ix);
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
dim->state = NET_DIM_START_MEASURE;
@@ -2102,7 +2102,7 @@ static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
/* If DIM was enabled, re-apply default parameters */
if (dim->use_dim) {
- moder = net_dim_get_def_profile(dim->dim.mode);
+ moder = net_dim_get_def_rx_moderation(dim->dim.mode);
usecs = moder.usec;
pkts = moder.pkts;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 08bbb639be1a..9f59b1270a7c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
- tg3_full_lock(tp, 0);
+ /* tp->hw_stats can be referenced safely:
+ * 1. under rtnl_lock
+ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+ */
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
- tg3_full_unlock(tp);
}
/*
@@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev,
struct tg3 *tp = netdev_priv(dev);
spin_lock_bh(&tp->lock);
- if (!tp->hw_stats) {
+ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
return;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b4c9268100bb..3e93df5d4e3b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -591,16 +591,10 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
- if (np) {
- err = of_mdiobus_register(bp->mii_bus, np);
- } else {
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
-
- err = mdiobus_register(bp->mii_bus);
- }
-
+ err = of_mdiobus_register(bp->mii_bus, np);
if (err)
goto err_out_free_mdiobus;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index e8b290473ee2..929d485a3a2f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct)
CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
}
-static int cn23xx_sriov_config(struct octeon_device *oct)
+int cn23xx_sriov_config(struct octeon_device *oct)
{
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u32 max_rings, total_rings, max_vfs, rings_per_vf;
@@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct)
break;
}
- if (max_rings <= num_present_cpus())
- num_pf_rings = 1;
+ if (oct->sriov_info.num_pf_rings)
+ num_pf_rings = oct->sriov_info.num_pf_rings;
else
num_pf_rings = num_present_cpus();
@@ -1497,3 +1497,57 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
octeon_mbox_write(oct, &mbox_cmd);
}
}
+
+static void
+cn23xx_get_vf_stats_callback(struct octeon_device *oct,
+ struct octeon_mbox_cmd *cmd, void *arg)
+{
+ struct oct_vf_stats_ctx *ctx = arg;
+
+ memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
+ atomic_set(&ctx->status, 1);
+}
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
+ struct oct_vf_stats *stats)
+{
+ u32 timeout = HZ; // 1sec
+ struct octeon_mbox_cmd mbox_cmd;
+ struct oct_vf_stats_ctx ctx;
+ u32 count = 0, ret;
+
+ if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
+ return -1;
+
+ if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
+ return -1;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ ctx.stats = stats;
+ atomic_set(&ctx.status, 0);
+ mbox_cmd.fn_arg = (void *)&ctx;
+ memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
+ octeon_mbox_write(oct, &mbox_cmd);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
+
+ ret = atomic_read(&ctx.status);
+ if (ret == 0) {
+ octeon_mbox_cancel(oct, 0);
+ dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
+ vfidx);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 2aba5247b6d8..e6f31d0d5c0b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -43,6 +43,15 @@ struct octeon_cn23xx_pf {
#define CN23XX_SLI_DEF_BP 0x40
+struct oct_vf_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 broadcast;
+ u64 multicast;
+};
+
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
int validate_cn23xx_pf_config_info(struct octeon_device *oct,
@@ -52,8 +61,13 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+int cn23xx_sriov_config(struct octeon_device *oct);
+
int cn23xx_fw_loaded(struct octeon_device *oct);
void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
u8 *mac);
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx,
+ struct oct_vf_stats *stats);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a94eee943b2..8093c5eafea2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -29,6 +29,162 @@
/* OOM task polling interval */
#define LIO_OOM_POLL_INTERVAL_MS 250
+#define OCTNIC_MAX_SG MAX_SKB_FRAGS
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+void lio_if_cfg_callback(struct octeon_device *oct,
+ u32 status __attribute__((unused)), void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Delete gather lists
+ * @param lio per-network private data
+ */
+void lio_delete_glists(struct lio *lio)
+{
+ struct octnic_gather *g;
+ int i;
+
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->oct_dev->num_iqs; i++) {
+ do {
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[i]);
+ kfree(g);
+ } while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
+ }
+
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
+{
+ struct octnic_gather *g;
+ int i, j;
+
+ lio->glist_lock =
+ kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
+ if (!lio->glist_lock)
+ return -ENOMEM;
+
+ lio->glist =
+ kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
+ if (!lio->glist) {
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
+
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
+
+ list_add_tail(&g->list, &lio->glist[i]);
+ }
+
+ if (j != lio->tx_qsize) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
struct lio *lio = GET_LIO(netdev);
@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
int num_ioq_vectors;
int irqret, err;
- oct->num_msix_irqs = num_ioqs;
if (oct->msix_on) {
+ oct->num_msix_irqs = num_ioqs;
if (OCTEON_CN23XX_PF(oct)) {
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
@@ -1169,3 +1325,355 @@ int lio_wait_for_clean_oq(struct octeon_device *oct)
return pending_pkts;
}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl =
+ (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+ rstats->red_drops = rsp_rstats->red_drops;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
+ rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
+ tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_pki = rsp_tstats->fw_err_pki;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ resp->status = -1;
+ }
+ complete(&ctrl->complete);
+}
+
+int octnet_get_link_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_ctrl *ctrl;
+ struct oct_nic_stats_resp *resp;
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
+ ctrl->netdev = netdev;
+ init_completion(&ctrl->complete);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ sc->callback = octnet_nic_stats_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 500; /*in milli seconds*/
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+
+ if (resp->status != 1) {
+ octeon_free_soft_command(oct_dev, sc);
+
+ return -EINVAL;
+ }
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return 0;
+}
+
+static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_soft_command *sc = buf;
+
+ ctx = sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status) {
+ dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
+ __func__,
+ CVM_CAST64(status));
+ }
+ ctx->status = status;
+ complete(&ctx->complete);
+}
+
+int liquidio_set_speed(struct lio *lio, int speed)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ u32 ctx_size;
+ int retval;
+ u32 var;
+
+ if (oct->speed_setting == speed)
+ return 0;
+
+ if (!OCTEON_CN23XX_PF(oct)) {
+ dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ ctx_size);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ ctx = sc->ctxptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ ctx->octeon_id = lio_get_device_id(oct);
+ ctx->status = 0;
+ init_completion(&ctx->complete);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
+ ncmd->s.param1 = speed;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ sc->callback = liquidio_nic_seapi_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ retval = -EBUSY;
+ } else {
+ /* Wait for response or timeout */
+ if (wait_for_completion_timeout(&ctx->complete,
+ msecs_to_jiffies(10000)) == 0) {
+ dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
+ __func__);
+ octeon_free_soft_command(oct, sc);
+ return -EINTR;
+ }
+
+ retval = resp->status;
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
+ __func__, retval);
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ if (var != speed) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: setting failed speed= %x, expect %x\n",
+ __func__, var, speed);
+ }
+
+ oct->speed_setting = var;
+ }
+
+ octeon_free_soft_command(oct, sc);
+
+ return retval;
+}
+
+int liquidio_get_speed(struct lio *lio)
+{
+ struct liquidio_nic_seapi_ctl_context *ctx;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ u32 ctx_size;
+ int retval;
+
+ ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ ctx_size);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ ctx = sc->ctxptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ ctx->octeon_id = lio_get_device_id(oct);
+ ctx->status = 0;
+ init_completion(&ctx->complete);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ sc->callback = liquidio_nic_seapi_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+
+ retval = -EBUSY;
+ } else {
+ if (wait_for_completion_timeout(&ctx->complete,
+ msecs_to_jiffies(10000)) == 0) {
+ dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
+ __func__);
+
+ oct->speed_setting = 25;
+ oct->no_speed_setting = 1;
+
+ octeon_free_soft_command(oct, sc);
+
+ return -EINTR;
+ }
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev,
+ "%s failed retval=%d\n", __func__, retval);
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+ octeon_free_soft_command(oct, sc);
+ retval = -EIO;
+ } else {
+ u32 var;
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ oct->speed_setting = var;
+ if (var == 0xffff) {
+ oct->no_speed_setting = 1;
+ /* unable to access boot variables
+ * get the default value based on the NIC type
+ */
+ oct->speed_setting = 25;
+ }
+ }
+ }
+
+ octeon_free_soft_command(oct, sc);
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 9926a12dd805..06f7449c569d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,7 +32,6 @@
#include "cn23xx_vf_device.h"
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
-static int octnet_get_link_stats(struct net_device *netdev);
struct oct_intrmod_context {
int octeon_id;
@@ -113,6 +112,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_tso_err",
"tx_vxlan",
+ "tx_mcast",
+ "tx_bcast",
+
"mac_tx_total_pkts",
"mac_tx_total_bytes",
"mac_tx_mcast_pkts",
@@ -120,7 +122,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_ctl_packets",
"mac_tx_total_collisions",
"mac_tx_one_collision",
- "mac_tx_multi_collison",
+ "mac_tx_multi_collision",
"mac_tx_max_collision_fail",
"mac_tx_max_deferal_fail",
"mac_tx_fifo_err",
@@ -128,6 +130,8 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"rx_total_rcvd",
"rx_total_fwd",
+ "rx_mcast",
+ "rx_bcast",
"rx_jabber_err",
"rx_l2_err",
"rx_frame_err",
@@ -172,6 +176,10 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
"tx_errors",
"rx_dropped",
"tx_dropped",
+ "rx_mcast",
+ "tx_mcast",
+ "rx_bcast",
+ "tx_bcast",
"link_state_changes",
};
@@ -222,46 +230,147 @@ static int lio_get_link_ksettings(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
- u32 supported = 0, advertising = 0;
linfo = &lio->linfo;
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+
switch (linfo->link.s.phy_type) {
case LIO_PHY_PORT_TP:
ecmd->base.port = PORT_TP;
- supported = (SUPPORTED_10000baseT_Full |
- SUPPORTED_TP | SUPPORTED_Pause);
- advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+
break;
case LIO_PHY_PORT_FIBRE:
- ecmd->base.port = PORT_FIBRE;
-
- if (linfo->link.s.speed == SPEED_10000) {
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
+ linfo->link.s.if_mode);
}
- supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
- advertising |= ADVERTISED_Pause;
+ ecmd->base.port = PORT_FIBRE;
ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (OCTEON_CN23XX_PF(oct)) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseCR_Full);
+
+ if (oct->no_speed_setting == 0) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+ }
+
+ if (oct->no_speed_setting == 0)
+ liquidio_get_speed(lio);
+ else
+ oct->speed_setting = 25;
+
+ if (oct->speed_setting == 10) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+ if (oct->speed_setting == 25) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+ } else { /* VF */
+ if (linfo->link.s.speed == 10000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+
+ if (linfo->link.s.speed == 25000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+ }
+ } else {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ }
break;
}
- if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
- linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd->link_modes.supported, supported);
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd->link_modes.advertising, advertising);
- } else {
- dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
- linfo->link.s.if_mode);
- }
-
if (linfo->link.s.link_up) {
ecmd->base.speed = linfo->link.s.speed;
ecmd->base.duplex = linfo->link.s.duplex;
@@ -273,6 +382,51 @@ static int lio_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static int lio_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ const int speed = ecmd->base.speed;
+ struct lio *lio = GET_LIO(netdev);
+ struct oct_link_info *linfo;
+ struct octeon_device *oct;
+ u32 is25G = 0;
+
+ oct = lio->oct_dev;
+
+ linfo = &lio->linfo;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ is25G = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (oct->no_speed_setting) {
+ dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
+ ecmd->base.duplex != linfo->link.s.duplex) ||
+ ecmd->base.autoneg != AUTONEG_DISABLE ||
+ (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
+ ecmd->base.speed != SPEED_UNKNOWN))
+ return -EOPNOTSUPP;
+
+ if ((oct->speed_boot == speed / 1000) &&
+ oct->speed_boot == oct->speed_setting)
+ return 0;
+
+ liquidio_set_speed(lio, speed / 1000);
+
+ dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
+ oct->speed_setting);
+
+ return 0;
+}
+
static void
lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -353,7 +507,14 @@ lio_ethtool_get_channels(struct net_device *dev,
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
} else if (OCTEON_CN23XX_PF(oct)) {
- max_combined = lio->linfo.num_txpciq;
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct, cn23xx_pf);
+
+ max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
combined_count = oct->num_iqs;
} else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL;
@@ -417,9 +578,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
kfree(oct->irq_name_storage);
oct->irq_name_storage = NULL;
+
+ if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
+ dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return -1;
+ }
+
if (octeon_setup_interrupt(oct, num_ioqs)) {
dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
- return 1;
+ return -1;
}
/* Enable Octeon device interrupts */
@@ -449,7 +616,16 @@ lio_ethtool_set_channels(struct net_device *dev,
combined_count = channel->combined_count;
if (OCTEON_CN23XX_PF(oct)) {
- max_combined = channel->max_combined;
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct,
+ cn23xx_pf);
+
+ max_combined =
+ CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
} else if (OCTEON_CN23XX_VF(oct)) {
u64 reg_val = 0ULL;
u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
@@ -477,7 +653,6 @@ lio_ethtool_set_channels(struct net_device *dev,
if (lio_reset_queues(dev, combined_count))
return -EINVAL;
- lio_irq_reallocate_irqs(oct, combined_count);
if (stopped)
dev->netdev_ops->ndo_open(dev);
@@ -816,12 +991,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
ering->rx_jumbo_max_pending = 0;
}
+static int lio_23xx_reconfigure_queue_count(struct lio *lio)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ struct liquidio_if_cfg_context *ctx;
+ u32 resp_size, ctx_size, data_size;
+ struct liquidio_if_cfg_resp *resp;
+ struct octeon_soft_command *sc;
+ union oct_nic_if_cfg if_cfg;
+ struct lio_version *vdata;
+ u32 ifidx_or_pfnum;
+ int retval;
+ int j;
+
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, data_size,
+ resp_size, ctx_size);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
+ __func__);
+ return -1;
+ }
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ ifidx_or_pfnum = oct->pf_num;
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.base_queue = oct->sriov_info.pf_srn;
+ if_cfg.s.gmx_port_id = oct->pf_num;
+
+ sc->iq_no = 0;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_QCOUNT_UPDATE, 0,
+ if_cfg.u64, 0);
+ sc->callback = lio_if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = LIO_IFCFG_WAIT_TIME;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ goto qcount_update_fail;
+ }
+
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
+ return -1;
+ }
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
+ goto qcount_update_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ lio->ifidx = ifidx_or_pfnum;
+ lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
+ lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+
+ octeon_free_soft_command(oct, sc);
+ dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
+ lio->linfo.num_rxpciq);
+
+ return 0;
+
+qcount_update_fail:
+ octeon_free_soft_command(oct, sc);
+
+ return -1;
+}
+
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ int i, queue_count_update = 0;
struct napi_struct *napi, *n;
- int i, update = 0;
+ int ret;
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
@@ -830,7 +1113,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
if (octeon_set_io_queues_off(oct)) {
- dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+ dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
return -1;
}
@@ -843,9 +1126,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
netif_napi_del(napi);
if (num_qs != oct->num_iqs) {
- netif_set_real_num_rx_queues(netdev, num_qs);
- netif_set_real_num_tx_queues(netdev, num_qs);
- update = 1;
+ ret = netif_set_real_num_rx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number rx failed\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_tx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number tx failed\n");
+ return ret;
+ }
+
+ /* The value of queue_count_update decides whether it is the
+ * queue count or the descriptor count that is being
+ * re-configured.
+ */
+ queue_count_update = 1;
+ }
+
+ /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
+ * and SRIOV disabled. Few things like recreating queue zero, resetting
+ * glists and IRQs are required for both. For the latter, some more
+ * steps like updating sriov_info for the octeon device need to be done.
+ */
+ if (queue_count_update) {
+ lio_delete_glists(lio);
+
+ /* Delete mbox for PF which is SRIOV disabled because sriov_info
+ * will be now changed.
+ */
+ if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
+ oct->fn_list.free_mbox(oct);
}
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
@@ -860,24 +1174,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
octeon_delete_instr_queue(oct, i);
}
+ if (queue_count_update) {
+ /* For PF re-configure sriov related information */
+ if ((OCTEON_CN23XX_PF(oct)) &&
+ !oct->sriov_info.sriov_enabled) {
+ oct->sriov_info.num_pf_rings = num_qs;
+ if (cn23xx_sriov_config(oct)) {
+ dev_err(&oct->pci_dev->dev,
+ "Queue reset aborted: SRIOV config failed\n");
+ return -1;
+ }
+
+ num_qs = oct->sriov_info.num_pf_rings;
+ }
+ }
+
if (oct->fn_list.setup_device_regs(oct)) {
dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
return -1;
}
- if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
- dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
- return -1;
+ /* The following are needed in case of queue count re-configuration and
+ * not for descriptor count re-configuration.
+ */
+ if (queue_count_update) {
+ if (octeon_setup_instr_queues(oct))
+ return -1;
+
+ if (octeon_setup_output_queues(oct))
+ return -1;
+
+ /* Recreating mbox for PF that is SRIOV disabled */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (oct->fn_list.setup_mbox(oct)) {
+ dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+ return -1;
+ }
+ }
+
+ /* Deleting and recreating IRQs whether the interface is SRIOV
+ * enabled or disabled.
+ */
+ if (lio_irq_reallocate_irqs(oct, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
+ return -1;
+ }
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
+ return -1;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->droq[i]->max_count,
+ oct->droq[i]->pkts_credit_reg);
+
+ /* Informing firmware about the new queue count. It is required
+ * for firmware to allocate more number of queues than those at
+ * load time.
+ */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (lio_23xx_reconfigure_queue_count(lio))
+ return -1;
+ }
}
- /* Enable the input and output queues for this Octeon device */
- if (oct->fn_list.enable_io_queues(oct)) {
- dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
+ /* Once firmware is aware of the new value, queues can be recreated */
+ if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
return -1;
}
- if (update && lio_send_queue_count_update(netdev, num_qs))
- return -1;
+ if (queue_count_update) {
+ if (lio_setup_glists(oct, lio, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
+ return -1;
+ }
+
+ /* Send firmware the information about new number of queues
+ * if the interface is a VF or a PF that is SRIOV enabled.
+ */
+ if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
+ if (lio_send_queue_count_update(netdev, num_qs))
+ return -1;
+ }
return 0;
}
@@ -922,7 +1303,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev,
CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
rx_count);
- if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
+ if (lio_reset_queues(netdev, oct->num_iqs))
goto err_lio_reset_queues;
if (stopped)
@@ -1057,50 +1438,48 @@ lio_get_ethtool_stats(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
- struct net_device_stats *netstats = &netdev->stats;
+ struct rtnl_link_stats64 lstats;
int i = 0, j;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
return;
- netdev->netdev_ops->ndo_get_stats(netdev);
- octnet_get_link_stats(netdev);
-
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
- data[i++] = CVM_CAST64(netstats->rx_packets);
+ data[i++] = lstats.rx_packets;
/*sum of oct->instr_queue[iq_no]->stats.tx_done */
- data[i++] = CVM_CAST64(netstats->tx_packets);
+ data[i++] = lstats.tx_packets;
/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
- data[i++] = CVM_CAST64(netstats->rx_bytes);
+ data[i++] = lstats.rx_bytes;
/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
- data[i++] = CVM_CAST64(netstats->tx_bytes);
- data[i++] = CVM_CAST64(netstats->rx_errors +
- oct_dev->link_stats.fromwire.fcs_err +
- oct_dev->link_stats.fromwire.jabber_err +
- oct_dev->link_stats.fromwire.l2_err +
- oct_dev->link_stats.fromwire.frame_err);
- data[i++] = CVM_CAST64(netstats->tx_errors);
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors +
+ oct_dev->link_stats.fromwire.fcs_err +
+ oct_dev->link_stats.fromwire.jabber_err +
+ oct_dev->link_stats.fromwire.l2_err +
+ oct_dev->link_stats.fromwire.frame_err;
+ data[i++] = lstats.tx_errors;
/*sum of oct->droq[oq_no]->stats->rx_dropped +
*oct->droq[oq_no]->stats->dropped_nodispatch +
*oct->droq[oq_no]->stats->dropped_toomany +
*oct->droq[oq_no]->stats->dropped_nomem
*/
- data[i++] = CVM_CAST64(netstats->rx_dropped +
- oct_dev->link_stats.fromwire.fifo_err +
- oct_dev->link_stats.fromwire.dmac_drop +
- oct_dev->link_stats.fromwire.red_drops +
- oct_dev->link_stats.fromwire.fw_err_pko +
- oct_dev->link_stats.fromwire.fw_err_link +
- oct_dev->link_stats.fromwire.fw_err_drop);
+ data[i++] = lstats.rx_dropped +
+ oct_dev->link_stats.fromwire.fifo_err +
+ oct_dev->link_stats.fromwire.dmac_drop +
+ oct_dev->link_stats.fromwire.red_drops +
+ oct_dev->link_stats.fromwire.fw_err_pko +
+ oct_dev->link_stats.fromwire.fw_err_link +
+ oct_dev->link_stats.fromwire.fw_err_drop;
/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
- data[i++] = CVM_CAST64(netstats->tx_dropped +
- oct_dev->link_stats.fromhost.max_collision_fail +
- oct_dev->link_stats.fromhost.max_deferral_fail +
- oct_dev->link_stats.fromhost.total_collisions +
- oct_dev->link_stats.fromhost.fw_err_pko +
- oct_dev->link_stats.fromhost.fw_err_link +
- oct_dev->link_stats.fromhost.fw_err_drop +
- oct_dev->link_stats.fromhost.fw_err_pki);
+ data[i++] = lstats.tx_dropped +
+ oct_dev->link_stats.fromhost.max_collision_fail +
+ oct_dev->link_stats.fromhost.max_deferral_fail +
+ oct_dev->link_stats.fromhost.total_collisions +
+ oct_dev->link_stats.fromhost.fw_err_pko +
+ oct_dev->link_stats.fromhost.fw_err_link +
+ oct_dev->link_stats.fromhost.fw_err_drop +
+ oct_dev->link_stats.fromhost.fw_err_pki;
/* firmware tx stats */
/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
@@ -1135,6 +1514,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+ /* Multicast packets sent by this port */
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
/* mac tx statistics */
/*CVMX_BGXX_CMRX_TX_STAT5 */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
@@ -1171,6 +1554,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
*fw_total_fwd
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /* Multicast packets received on this port */
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
@@ -1339,7 +1725,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
__attribute__((unused)),
u64 *data)
{
- struct net_device_stats *netstats = &netdev->stats;
+ struct rtnl_link_stats64 lstats;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int i = 0, j, vj;
@@ -1347,25 +1733,31 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
return;
- netdev->netdev_ops->ndo_get_stats(netdev);
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
- data[i++] = CVM_CAST64(netstats->rx_packets);
+ data[i++] = lstats.rx_packets;
/* sum of oct->instr_queue[iq_no]->stats.tx_done */
- data[i++] = CVM_CAST64(netstats->tx_packets);
+ data[i++] = lstats.tx_packets;
/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
- data[i++] = CVM_CAST64(netstats->rx_bytes);
+ data[i++] = lstats.rx_bytes;
/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
- data[i++] = CVM_CAST64(netstats->tx_bytes);
- data[i++] = CVM_CAST64(netstats->rx_errors);
- data[i++] = CVM_CAST64(netstats->tx_errors);
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors;
+ data[i++] = lstats.tx_errors;
/* sum of oct->droq[oq_no]->stats->rx_dropped +
* oct->droq[oq_no]->stats->dropped_nodispatch +
* oct->droq[oq_no]->stats->dropped_toomany +
* oct->droq[oq_no]->stats->dropped_nomem
*/
- data[i++] = CVM_CAST64(netstats->rx_dropped);
+ data[i++] = lstats.rx_dropped;
/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
- data[i++] = CVM_CAST64(netstats->tx_dropped);
+ data[i++] = lstats.tx_dropped;
+
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
/* lio->link_changes */
data[i++] = CVM_CAST64(lio->link_changes);
@@ -1776,162 +2168,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return -EINTR;
}
-static void
-octnet_nic_stats_callback(struct octeon_device *oct_dev,
- u32 status, void *ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp =
- (struct oct_nic_stats_resp *)sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl =
- (struct oct_nic_stats_ctrl *)sc->ctxptr;
- struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
- struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
-
- struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
- struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
-
- if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
- octeon_swap_8B_data((u64 *)&resp->stats,
- (sizeof(struct oct_link_stats)) >> 3);
-
- /* RX link-level stats */
- rstats->total_rcvd = rsp_rstats->total_rcvd;
- rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
- rstats->total_bcst = rsp_rstats->total_bcst;
- rstats->total_mcst = rsp_rstats->total_mcst;
- rstats->runts = rsp_rstats->runts;
- rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
- /* Accounts for over/under-run of buffers */
- rstats->fifo_err = rsp_rstats->fifo_err;
- rstats->dmac_drop = rsp_rstats->dmac_drop;
- rstats->fcs_err = rsp_rstats->fcs_err;
- rstats->jabber_err = rsp_rstats->jabber_err;
- rstats->l2_err = rsp_rstats->l2_err;
- rstats->frame_err = rsp_rstats->frame_err;
- rstats->red_drops = rsp_rstats->red_drops;
-
- /* RX firmware stats */
- rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
- rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
- rstats->fw_err_pko = rsp_rstats->fw_err_pko;
- rstats->fw_err_link = rsp_rstats->fw_err_link;
- rstats->fw_err_drop = rsp_rstats->fw_err_drop;
- rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
- rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
-
- /* Number of packets that are LROed */
- rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
- /* Number of octets that are LROed */
- rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
- /* Number of LRO packets formed */
- rstats->fw_total_lro = rsp_rstats->fw_total_lro;
- /* Number of times lRO of packet aborted */
- rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
- rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
- rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
- rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
- rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
- /* intrmod: packet forward rate */
- rstats->fwd_rate = rsp_rstats->fwd_rate;
-
- /* TX link-level stats */
- tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
- tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
- tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
- tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
- tstats->ctl_sent = rsp_tstats->ctl_sent;
- /* Packets sent after one collision*/
- tstats->one_collision_sent = rsp_tstats->one_collision_sent;
- /* Packets sent after multiple collision*/
- tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
- /* Packets not sent due to max collisions */
- tstats->max_collision_fail = rsp_tstats->max_collision_fail;
- /* Packets not sent due to max deferrals */
- tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
- /* Accounts for over/under-run of buffers */
- tstats->fifo_err = rsp_tstats->fifo_err;
- tstats->runts = rsp_tstats->runts;
- /* Total number of collisions detected */
- tstats->total_collisions = rsp_tstats->total_collisions;
-
- /* firmware stats */
- tstats->fw_total_sent = rsp_tstats->fw_total_sent;
- tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
- tstats->fw_err_pko = rsp_tstats->fw_err_pko;
- tstats->fw_err_pki = rsp_tstats->fw_err_pki;
- tstats->fw_err_link = rsp_tstats->fw_err_link;
- tstats->fw_err_drop = rsp_tstats->fw_err_drop;
- tstats->fw_tso = rsp_tstats->fw_tso;
- tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
- tstats->fw_err_tso = rsp_tstats->fw_err_tso;
- tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
-
- resp->status = 1;
- } else {
- resp->status = -1;
- }
- complete(&ctrl->complete);
-}
-
-/* Configure interrupt moderation parameters */
-static int octnet_get_link_stats(struct net_device *netdev)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct_dev = lio->oct_dev;
-
- struct octeon_soft_command *sc;
- struct oct_nic_stats_ctrl *ctrl;
- struct oct_nic_stats_resp *resp;
-
- int retval;
-
- /* Alloc soft command */
- sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct_dev,
- 0,
- sizeof(struct oct_nic_stats_resp),
- sizeof(struct octnic_ctrl_pkt));
-
- if (!sc)
- return -ENOMEM;
-
- resp = (struct oct_nic_stats_resp *)sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_stats_resp));
-
- ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
- memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
- ctrl->netdev = netdev;
- init_completion(&ctrl->complete);
-
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
-
- octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
- OPCODE_NIC_PORT_STATS, 0, 0, 0);
-
- sc->callback = octnet_nic_stats_callback;
- sc->callback_arg = sc;
- sc->wait_time = 500; /*in milli seconds*/
-
- retval = octeon_send_soft_command(oct_dev, sc);
- if (retval == IQ_SEND_FAILED) {
- octeon_free_soft_command(oct_dev, sc);
- return -EINVAL;
- }
-
- wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
-
- if (resp->status != 1) {
- octeon_free_soft_command(oct_dev, sc);
-
- return -EINVAL;
- }
-
- octeon_free_soft_command(oct_dev, sc);
-
- return 0;
-}
-
static int lio_get_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -2876,6 +3112,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
static const struct ethtool_ops lio_ethtool_ops = {
.get_link_ksettings = lio_get_link_ksettings,
+ .set_link_ksettings = lio_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 44c2a0c1bbdd..e500528ad751 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -138,33 +138,10 @@ union tx_info {
* by this structure in the NIC module.
*/
-#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
-
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
-/** Structure of a node in list of gather components maintained by
- * NIC driver for each network device.
- */
-struct octnic_gather {
- /** List manipulation. Next and prev pointers. */
- struct list_head list;
-
- /** Size of the gather component at sg in bytes. */
- int sg_size;
-
- /** Number of bytes that sg was adjusted to make it 8B-aligned. */
- int adjust;
-
- /** Gather component that can accommodate max sized fragment list
- * received from the IP layer.
- */
- struct octeon_sg_entry *sg;
-
- dma_addr_t sg_dma_ptr;
-};
-
struct handshake {
struct completion init;
struct completion started;
@@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void)
*/
static inline int check_txq_status(struct lio *lio)
{
- int numqs = lio->netdev->num_tx_queues;
+ int numqs = lio->netdev->real_num_tx_queues;
int ret_val = 0;
int q, iq;
@@ -542,148 +519,6 @@ static inline int check_txq_status(struct lio *lio)
}
/**
- * Remove the node at the head of the list. The list would be empty at
- * the end of this call if there are no more nodes in the list.
- */
-static inline struct list_head *list_delete_head(struct list_head *root)
-{
- struct list_head *node;
-
- if ((root->prev == root) && (root->next == root))
- node = NULL;
- else
- node = root->next;
-
- if (node)
- list_del(node);
-
- return node;
-}
-
-/**
- * \brief Delete gather lists
- * @param lio per-network private data
- */
-static void delete_glists(struct lio *lio)
-{
- struct octnic_gather *g;
- int i;
-
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
-
- if (!lio->glist)
- return;
-
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist[i]);
- if (g)
- kfree(g);
- } while (g);
-
- if (lio->glists_virt_base && lio->glists_virt_base[i] &&
- lio->glists_dma_base && lio->glists_dma_base[i]) {
- lio_dma_free(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- lio->glists_virt_base[i],
- lio->glists_dma_base[i]);
- }
- }
-
- kfree(lio->glists_virt_base);
- lio->glists_virt_base = NULL;
-
- kfree(lio->glists_dma_base);
- lio->glists_dma_base = NULL;
-
- kfree(lio->glist);
- lio->glist = NULL;
-}
-
-/**
- * \brief Setup gather lists
- * @param lio per-network private data
- */
-static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
-{
- int i, j;
- struct octnic_gather *g;
-
- lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
- GFP_KERNEL);
- if (!lio->glist_lock)
- return -ENOMEM;
-
- lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
- GFP_KERNEL);
- if (!lio->glist) {
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- return -ENOMEM;
- }
-
- lio->glist_entry_size =
- ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
-
- /* allocate memory to store virtual and dma base address of
- * per glist consistent memory
- */
- lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
- GFP_KERNEL);
- lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
- GFP_KERNEL);
-
- if (!lio->glists_virt_base || !lio->glists_dma_base) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (i = 0; i < num_iqs; i++) {
- int numa_node = dev_to_node(&oct->pci_dev->dev);
-
- spin_lock_init(&lio->glist_lock[i]);
-
- INIT_LIST_HEAD(&lio->glist[i]);
-
- lio->glists_virt_base[i] =
- lio_dma_alloc(oct,
- lio->glist_entry_size * lio->tx_qsize,
- &lio->glists_dma_base[i]);
-
- if (!lio->glists_virt_base[i]) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (j = 0; j < lio->tx_qsize; j++) {
- g = kzalloc_node(sizeof(*g), GFP_KERNEL,
- numa_node);
- if (!g)
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
-
- g->sg = lio->glists_virt_base[i] +
- (j * lio->glist_entry_size);
-
- g->sg_dma_ptr = lio->glists_dma_base[i] +
- (j * lio->glist_entry_size);
-
- list_add_tail(&g->list, &lio->glist[i]);
- }
-
- if (j != lio->tx_qsize) {
- delete_glists(lio);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
* \brief Print link information
* @param netdev network device
*/
@@ -1077,6 +912,9 @@ liquidio_probe(struct pci_dev *pdev,
/* set linux specific device pointer */
oct_dev->pci_dev = (void *)pdev;
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
hs = &handshake[oct_dev->octeon_id];
init_completion(&hs->init);
init_completion(&hs->started);
@@ -1471,7 +1309,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_rx_oom_poll_fn(netdev);
- delete_glists(lio);
+ lio_delete_glists(lio);
free_netdev(netdev);
@@ -1686,7 +1524,7 @@ static void free_netsgbuf(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
spin_unlock(&lio->glist_lock[iq]);
@@ -1729,7 +1567,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1942,39 +1780,6 @@ static int load_firmware(struct octeon_device *oct)
}
/**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)),
- void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_resp *resp;
- struct liquidio_if_cfg_context *ctx;
-
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
- CVM_CAST64(resp->status), status);
- WRITE_ONCE(ctx->cond, 1);
-
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Poll routine for checking transmit queue status
* @param work work_struct data structure
*/
@@ -2049,11 +1854,6 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- /* Ready for link status updates */
- lio->intf_open = 1;
-
- netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
-
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
if (setup_tx_poll_fn(netdev))
@@ -2063,7 +1863,12 @@ static int liquidio_open(struct net_device *netdev)
return -1;
}
- start_txqs(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -2086,11 +1891,15 @@ static int liquidio_stop(struct net_device *netdev)
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
- netif_tx_disable(netdev);
+ /* Stop any link updates */
+ lio->intf_open = 0;
+
+ stop_txqs(netdev);
/* Inform that netif carrier is down */
netif_carrier_off(netdev);
- lio->intf_open = 0;
+ netif_tx_disable(netdev);
+
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
@@ -2252,14 +2061,11 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/**
- * \brief Net device get_stats
- * @param netdev network device
- */
-static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
{
struct lio *lio = GET_LIO(netdev);
- struct net_device_stats *stats = &netdev->stats;
struct octeon_device *oct;
u64 pkts = 0, drop = 0, bytes = 0;
struct oct_droq_stats *oq_stats;
@@ -2269,7 +2075,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
- return stats;
+ return;
for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -2279,9 +2085,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += iq_stats->tx_tot_bytes;
}
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = drop;
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
pkts = 0;
drop = 0;
@@ -2298,11 +2104,34 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += oq_stats->rx_bytes_received;
}
- stats->rx_bytes = bytes;
- stats->rx_packets = pkts;
- stats->rx_dropped = drop;
-
- return stats;
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ octnet_get_link_stats(netdev);
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+ lstats->collisions = oct->link_stats.fromhost.total_collisions;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
+ /* recv'r fifo overrun */
+ lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
+
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors + lstats->rx_fifo_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+ lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors +
+ lstats->tx_fifo_errors;
}
/**
@@ -2510,7 +2339,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev);
oct = lio->oct_dev;
- q_idx = skb_iq(lio, skb);
+ q_idx = skb_iq(oct, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
@@ -2603,7 +2432,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock(&lio->glist_lock[q_idx]);
g = (struct octnic_gather *)
- list_delete_head(&lio->glist[q_idx]);
+ lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
@@ -3326,11 +3155,36 @@ static const struct switchdev_ops lio_pf_switchdev_ops = {
.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
};
+static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_vf_stats stats;
+ int ret;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ memset(&stats, 0, sizeof(struct oct_vf_stats));
+ ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
+ if (!ret) {
+ vf_stats->rx_packets = stats.rx_packets;
+ vf_stats->tx_packets = stats.tx_packets;
+ vf_stats->rx_bytes = stats.rx_bytes;
+ vf_stats->tx_bytes = stats.tx_bytes;
+ vf_stats->broadcast = stats.broadcast;
+ vf_stats->multicast = stats.multicast;
+ }
+
+ return ret;
+}
+
static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit,
- .ndo_get_stats = liquidio_get_stats,
+ .ndo_get_stats64 = liquidio_get_stats64,
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
@@ -3348,6 +3202,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_get_vf_config = liquidio_get_vf_config,
.ndo_set_vf_trust = liquidio_set_vf_trust,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
+ .ndo_get_vf_stats = liquidio_get_vf_stats,
};
/** \brief Entry point for the liquidio module
@@ -3450,6 +3305,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
int retval, num_iqueues, num_oqueues;
+ int max_num_queues = 0;
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
@@ -3530,9 +3386,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
- sc->callback = if_cfg_callback;
+ sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
- sc->wait_time = 3000;
+ sc->wait_time = LIO_IFCFG_WAIT_TIME;
retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) {
@@ -3586,11 +3442,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
resp->cfg_info.oqmask);
goto setup_nic_dev_fail;
}
+
+ if (OCTEON_CN6XXX(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn6xxx));
+ } else if (OCTEON_CN23XX_PF(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn23xx_pf));
+ }
+
dev_dbg(&octeon_dev->pci_dev->dev,
- "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
- num_iqueues, num_oqueues);
- netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+ num_iqueues, num_oqueues, max_num_queues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
if (!netdev) {
dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
@@ -3605,6 +3470,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->netdev_ops = &lionetdevops;
SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
+ retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number rx failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number tx failed\n");
+ goto setup_nic_dev_fail;
+ }
+
lio = GET_LIO(netdev);
memset(lio, 0, sizeof(struct lio));
@@ -3726,7 +3605,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glists(octeon_dev, lio, num_iqueues)) {
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -3788,6 +3667,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"NIC ifidx:%d Setup successful\n", i);
octeon_free_soft_command(octeon_dev, sc);
+
+ if (octeon_dev->subsystem_id ==
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
+ octeon_dev->subsystem_id ==
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
+ liquidio_get_speed(lio);
+
+ if (octeon_dev->speed_setting == 0) {
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ }
+ } else {
+ octeon_dev->no_speed_setting = 1;
+ octeon_dev->speed_setting = 10;
+ }
+ octeon_dev->speed_boot = octeon_dev->speed_setting;
+
}
devlink = devlink_alloc(&liquidio_devlink_ops,
@@ -4225,7 +4121,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
- if (octeon_allocate_ioq_vector(octeon_dev)) {
+ if (octeon_allocate_ioq_vector
+ (octeon_dev,
+ octeon_dev->sriov_info.num_pf_rings)) {
dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f92dfa411de6..7fa0212873ac 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -69,30 +69,10 @@ union tx_info {
} s;
};
-#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
-
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
#define OCTNIC_GSO_MAX_SIZE \
(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
-struct octnic_gather {
- /* List manipulation. Next and prev pointers. */
- struct list_head list;
-
- /* Size of the gather component at sg in bytes. */
- int sg_size;
-
- /* Number of bytes that sg was adjusted to make it 8B-aligned. */
- int adjust;
-
- /* Gather component that can accommodate max sized fragment list
- * received from the IP layer.
- */
- struct octeon_sg_entry *sg;
-
- dma_addr_t sg_dma_ptr;
-};
-
static int
liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void liquidio_vf_remove(struct pci_dev *pdev);
@@ -285,142 +265,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * Remove the node at the head of the list. The list would be empty at
- * the end of this call if there are no more nodes in the list.
- */
-static struct list_head *list_delete_head(struct list_head *root)
-{
- struct list_head *node;
-
- if ((root->prev == root) && (root->next == root))
- node = NULL;
- else
- node = root->next;
-
- if (node)
- list_del(node);
-
- return node;
-}
-
-/**
- * \brief Delete gather lists
- * @param lio per-network private data
- */
-static void delete_glists(struct lio *lio)
-{
- struct octnic_gather *g;
- int i;
-
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
-
- if (!lio->glist)
- return;
-
- for (i = 0; i < lio->linfo.num_txpciq; i++) {
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist[i]);
- kfree(g);
- } while (g);
-
- if (lio->glists_virt_base && lio->glists_virt_base[i] &&
- lio->glists_dma_base && lio->glists_dma_base[i]) {
- lio_dma_free(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- lio->glists_virt_base[i],
- lio->glists_dma_base[i]);
- }
- }
-
- kfree(lio->glists_virt_base);
- lio->glists_virt_base = NULL;
-
- kfree(lio->glists_dma_base);
- lio->glists_dma_base = NULL;
-
- kfree(lio->glist);
- lio->glist = NULL;
-}
-
-/**
- * \brief Setup gather lists
- * @param lio per-network private data
- */
-static int setup_glists(struct lio *lio, int num_iqs)
-{
- struct octnic_gather *g;
- int i, j;
-
- lio->glist_lock =
- kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
- if (!lio->glist_lock)
- return -ENOMEM;
-
- lio->glist =
- kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
- if (!lio->glist) {
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- return -ENOMEM;
- }
-
- lio->glist_entry_size =
- ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
-
- /* allocate memory to store virtual and dma base address of
- * per glist consistent memory
- */
- lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
- GFP_KERNEL);
- lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
- GFP_KERNEL);
-
- if (!lio->glists_virt_base || !lio->glists_dma_base) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (i = 0; i < num_iqs; i++) {
- spin_lock_init(&lio->glist_lock[i]);
-
- INIT_LIST_HEAD(&lio->glist[i]);
-
- lio->glists_virt_base[i] =
- lio_dma_alloc(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- &lio->glists_dma_base[i]);
-
- if (!lio->glists_virt_base[i]) {
- delete_glists(lio);
- return -ENOMEM;
- }
-
- for (j = 0; j < lio->tx_qsize; j++) {
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
-
- g->sg = lio->glists_virt_base[i] +
- (j * lio->glist_entry_size);
-
- g->sg_dma_ptr = lio->glists_dma_base[i] +
- (j * lio->glist_entry_size);
-
- list_add_tail(&g->list, &lio->glist[i]);
- }
-
- if (j != lio->tx_qsize) {
- delete_glists(lio);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
* \brief Print link information
* @param netdev network device
*/
@@ -567,6 +411,9 @@ liquidio_vf_probe(struct pci_dev *pdev,
/* set linux specific device pointer */
oct_dev->pci_dev = pdev;
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
if (octeon_device_init(oct_dev)) {
liquidio_vf_remove(pdev);
return -ENOMEM;
@@ -856,7 +703,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
cleanup_link_status_change_wq(netdev);
- delete_glists(lio);
+ lio_delete_glists(lio);
free_netdev(netdev);
@@ -1005,7 +852,7 @@ static void free_netsgbuf(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1049,7 +896,7 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- iq = skb_iq(lio, skb);
+ iq = skb_iq(lio->oct_dev, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1059,38 +906,6 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)), void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
- struct liquidio_if_cfg_resp *resp;
-
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
- WRITE_ONCE(ctx->cond, 1);
-
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Net device open for LiquidIO
* @param netdev network device
*/
@@ -1336,24 +1151,21 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/**
- * \brief Net device get_stats
- * @param netdev network device
- */
-static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
{
struct lio *lio = GET_LIO(netdev);
- struct net_device_stats *stats = &netdev->stats;
+ struct octeon_device *oct;
u64 pkts = 0, drop = 0, bytes = 0;
struct oct_droq_stats *oq_stats;
struct oct_iq_stats *iq_stats;
- struct octeon_device *oct;
int i, iq_no, oq_no;
oct = lio->oct_dev;
if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
- return stats;
+ return;
for (i = 0; i < oct->num_iqs; i++) {
iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -1363,9 +1175,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += iq_stats->tx_tot_bytes;
}
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = drop;
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
pkts = 0;
drop = 0;
@@ -1382,11 +1194,29 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes += oq_stats->rx_bytes_received;
}
- stats->rx_bytes = bytes;
- stats->rx_packets = pkts;
- stats->rx_dropped = drop;
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ octnet_get_link_stats(netdev);
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
- return stats;
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors;
}
/**
@@ -1580,7 +1410,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev);
oct = lio->oct_dev;
- q_idx = skb_iq(lio, skb);
+ q_idx = skb_iq(lio->oct_dev, skb);
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
@@ -1661,8 +1491,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
int i, frags;
spin_lock(&lio->glist_lock[q_idx]);
- g = (struct octnic_gather *)list_delete_head(
- &lio->glist[q_idx]);
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[q_idx]);
spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
@@ -2034,7 +1864,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
.ndo_start_xmit = liquidio_xmit,
- .ndo_get_stats = liquidio_get_stats,
+ .ndo_get_stats64 = liquidio_get_stats64,
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
@@ -2156,7 +1986,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
0);
- sc->callback = if_cfg_callback;
+ sc->callback = lio_if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = 5000;
@@ -2273,6 +2103,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability;
+ netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
/* MTU range: 68 - 16000 */
netdev->min_mtu = LIO_MIN_MTU_SIZE;
@@ -2321,7 +2152,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glists(lio, num_iqueues)) {
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -2371,6 +2202,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"NIC ifidx:%d Setup successful\n", i);
octeon_free_soft_command(octeon_dev, sc);
+
+ octeon_dev->no_speed_setting = 1;
}
return 0;
@@ -2512,7 +2345,7 @@ static int octeon_device_init(struct octeon_device *oct)
}
atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
- if (octeon_allocate_ioq_vector(oct)) {
+ if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
return 1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 2adafa366d3f..ddd7431579f4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -201,13 +201,14 @@ lio_vf_rep_get_stats64(struct net_device *dev,
{
struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
- stats64->tx_packets = vf_rep->stats.tx_packets;
- stats64->tx_bytes = vf_rep->stats.tx_bytes;
- stats64->tx_dropped = vf_rep->stats.tx_dropped;
-
- stats64->rx_packets = vf_rep->stats.rx_packets;
- stats64->rx_bytes = vf_rep->stats.rx_bytes;
- stats64->rx_dropped = vf_rep->stats.rx_dropped;
+ /* Swap tx and rx stats as VF rep is a switch port */
+ stats64->tx_packets = vf_rep->stats.rx_packets;
+ stats64->tx_bytes = vf_rep->stats.rx_bytes;
+ stats64->tx_dropped = vf_rep->stats.rx_dropped;
+
+ stats64->rx_packets = vf_rep->stats.tx_packets;
+ stats64->rx_bytes = vf_rep->stats.tx_bytes;
+ stats64->rx_dropped = vf_rep->stats.tx_dropped;
}
static int
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 34a94daca590..690424b6781a 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -28,7 +28,7 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
#define LIQUIDIO_BASE_MINOR_VERSION 7
-#define LIQUIDIO_BASE_MICRO_VERSION 0
+#define LIQUIDIO_BASE_MICRO_VERSION 2
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
@@ -84,6 +84,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
+#define OPCODE_NIC_QCOUNT_UPDATE 0x12
#define OPCODE_NIC_SET_TRUSTED_VF 0x13
#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
#define VF_DRV_LOADED 1
@@ -92,6 +93,7 @@ enum octeon_tag_type {
#define OPCODE_NIC_VF_REP_PKT 0x15
#define OPCODE_NIC_VF_REP_CMD 0x16
+#define OPCODE_NIC_UBOOT_CTL 0x17
#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
@@ -248,6 +250,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
+#define SEAPI_CMD_SPEED_SET 0x2
+#define SEAPI_CMD_SPEED_GET 0x3
+
#define LIO_CMD_WAIT_TM 100
/* RX(packets coming from wire) Checksum verification flags */
@@ -802,6 +807,9 @@ struct nic_rx_stats {
u64 fw_total_rcvd;
u64 fw_total_fwd;
u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast;
+ u64 fw_total_bcast;
+
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
@@ -858,6 +866,8 @@ struct nic_tx_stats {
u64 fw_total_sent;
u64 fw_total_fwd;
u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast_sent;
+ u64 fw_total_bcast_sent;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f38abf626412..f878a552fef3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct)
}
int
-octeon_allocate_ioq_vector(struct octeon_device *oct)
+octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
{
- int i, num_ioqs = 0;
struct octeon_ioq_vector *ioq_vector;
int cpu_num;
int size;
-
- if (OCTEON_CN23XX_PF(oct))
- num_ioqs = oct->sriov_info.num_pf_rings;
- else if (OCTEON_CN23XX_VF(oct))
- num_ioqs = oct->sriov_info.rings_per_vf;
+ int i;
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
- return 1;
+ return -1;
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
@@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
else
ioq_vector->ioq_num = i;
}
+
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 91937cc5c1d7..94a4ed88d618 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -43,6 +43,13 @@
#define OCTEON_CN23XX_REV_1_1 0x01
#define OCTEON_CN23XX_REV_2_0 0x80
+/**SubsystemId for the chips */
+#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d
+#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d
+#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d
+#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d
+#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d
+
/** Endian-swap modes supported by Octeon. */
enum octeon_pci_swap_mode {
OCTEON_PCI_PASSTHROUGH = 0,
@@ -430,6 +437,8 @@ struct octeon_device {
u16 rev_id;
+ u32 subsystem_id;
+
u16 pf_num;
u16 vf_num;
@@ -584,6 +593,11 @@ struct octeon_device {
struct lio_vf_rep_list vf_rep_list;
struct devlink *devlink;
enum devlink_eswitch_mode eswitch_mode;
+
+ /* for 25G NIC speed change */
+ u8 speed_boot;
+ u8 speed_setting;
+ u8 no_speed_setting;
};
#define OCT_DRV_ONLINE 1
@@ -867,7 +881,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
void octeon_free_ioq_vector(struct octeon_device *oct);
-int octeon_allocate_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs);
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
/* LiquidIO driver pivate flags */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 28e74ee23ff8..021d99cd1665 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -24,6 +24,7 @@
#include "octeon_device.h"
#include "octeon_main.h"
#include "octeon_mailbox.h"
+#include "cn23xx_pf_device.h"
/**
* octeon_mbox_read:
@@ -205,6 +206,26 @@ int octeon_mbox_write(struct octeon_device *oct,
return ret;
}
+static void get_vf_stats(struct octeon_device *oct,
+ struct oct_vf_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ if (!oct->instr_queue[i])
+ continue;
+ stats->tx_packets += oct->instr_queue[i]->stats.tx_done;
+ stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (!oct->droq[i])
+ continue;
+ stats->rx_packets += oct->droq[i]->stats.rx_pkts_received;
+ stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received;
+ }
+}
+
/**
* octeon_mbox_process_cmd:
* @mbox: Pointer mailbox
@@ -250,6 +271,15 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
mbox_cmd->msg.s.params);
break;
+ case OCTEON_GET_VF_STATS:
+ dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n");
+ mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
+ mbox_cmd->msg.s.resp_needed = 1;
+ mbox_cmd->msg.s.len = 1 +
+ sizeof(struct oct_vf_stats) / sizeof(u64);
+ get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data);
+ octeon_mbox_write(oct, mbox_cmd);
+ break;
default:
break;
}
@@ -322,3 +352,25 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox)
return 0;
}
+
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no)
+{
+ struct octeon_mbox *mbox = oct->mbox[q_no];
+ struct octeon_mbox_cmd *mbox_cmd;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ mbox_cmd = &mbox->mbox_resp;
+
+ if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return 1;
+ }
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ memset(mbox_cmd, 0, sizeof(*mbox_cmd));
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index 1def22afeff1..d92bd7e16477 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -25,6 +25,7 @@
#define OCTEON_VF_ACTIVE 0x1
#define OCTEON_VF_FLR_REQUEST 0x2
#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
+#define OCTEON_GET_VF_STATS 0x8
/*Macro for Read acknowldgement*/
#define OCTEON_PFVFACK 0xffffffffffffffffULL
@@ -107,9 +108,15 @@ struct octeon_mbox {
};
+struct oct_vf_stats_ctx {
+ atomic_t status;
+ struct oct_vf_stats *stats;
+};
+
int octeon_mbox_read(struct octeon_mbox *mbox);
int octeon_mbox_write(struct octeon_device *oct,
struct octeon_mbox_cmd *mbox_cmd);
int octeon_mbox_process_message(struct octeon_mbox *mbox);
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 4069710796a8..d7a3916fe877 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp {
u64 status;
};
+#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
+
+/* Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /* List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /* Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /* Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /* Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
+};
+
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
@@ -58,6 +81,18 @@ struct oct_nic_stats_ctrl {
struct net_device *netdev;
};
+struct oct_nic_seapi_resp {
+ u64 rh;
+ u32 speed;
+ u64 status;
+};
+
+struct liquidio_nic_seapi_ctl_context {
+ int octeon_id;
+ u32 status;
+ struct completion complete;
+};
+
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -157,7 +192,7 @@ struct lio {
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
-#define LIO_MAX_CORES 12
+#define LIO_MAX_CORES 16
/**
* \brief Enable or disable feature
@@ -190,6 +225,8 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
+int octnet_get_link_stats(struct net_device *netdev);
+
int lio_wait_for_clean_oq(struct octeon_device *oct);
/**
* \brief Register ethtool operations
@@ -197,6 +234,17 @@ int lio_wait_for_clean_oq(struct octeon_device *oct);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
+void lio_if_cfg_callback(struct octeon_device *oct,
+ u32 status __attribute__((unused)),
+ void *buf);
+
+void lio_delete_glists(struct lio *lio);
+
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
+
+int liquidio_get_speed(struct lio *lio);
+int liquidio_set_speed(struct lio *lio, int speed);
+
/**
* \brief Net device change_mtu
* @param netdev network device
@@ -515,7 +563,7 @@ static inline void stop_txqs(struct net_device *netdev)
{
int i;
- for (i = 0; i < netdev->num_tx_queues; i++)
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
}
@@ -528,7 +576,7 @@ static inline void wake_txqs(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
int i, qno;
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
@@ -549,14 +597,33 @@ static inline void start_txqs(struct net_device *netdev)
int i;
if (lio->linfo.link.s.link_up) {
- for (i = 0; i < netdev->num_tx_queues; i++)
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
netif_start_subqueue(netdev, i);
}
}
-static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
+static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
{
- return skb->queue_mapping % lio->linfo.num_txpciq;
+ return skb->queue_mapping % oct->num_iqs;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *lio_list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if (root->prev == root && root->next == root)
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
}
#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 707db3304396..7135db45927e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -538,9 +538,9 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
action = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
+ len = xdp.data_end - xdp.data;
/* Check if XDP program has changed headers */
if (orig_data != xdp.data) {
- len = xdp.data_end - xdp.data;
offset = orig_data - xdp.data;
dma_addr -= offset;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index b57acb8dc35b..3c5057868ab3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -62,6 +62,18 @@ struct cudbg_hw_sched {
u32 map;
};
+#define SGE_QBASE_DATA_REG_NUM 4
+
+struct sge_qbase_reg_field {
+ u32 reg_addr;
+ u32 reg_data[SGE_QBASE_DATA_REG_NUM];
+ /* Max supported PFs */
+ u32 pf_data_value[PCIE_FW_MASTER_M + 1][SGE_QBASE_DATA_REG_NUM];
+ /* Max supported VFs */
+ u32 vf_data_value[T6_VF_M + 1][SGE_QBASE_DATA_REG_NUM];
+ u32 vfcount; /* Actual number of max vfs in current configuration */
+};
+
struct ireg_field {
u32 ireg_addr;
u32 ireg_data;
@@ -235,6 +247,9 @@ struct cudbg_vpd_data {
};
#define CUDBG_MAX_TCAM_TID 0x800
+#define CUDBG_T6_CLIP 1536
+#define CUDBG_MAX_TID_COMP_EN 6144
+#define CUDBG_MAX_TID_COMP_DIS 3072
enum cudbg_le_entry_types {
LE_ET_UNKNOWN = 0,
@@ -354,6 +369,11 @@ static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
{0x10cc, 0x10d4, 0x0, 16},
};
+static const u32 t6_sge_qbase_index_array[] = {
+ /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */
+ 0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
+};
+
static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
{0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
{0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
@@ -419,15 +439,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
- {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+ {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
+ {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
+ {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
+ {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
+ {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
+ {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
+ {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
+ {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
+ {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
};
static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
@@ -444,16 +464,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
- {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
- {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
- {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
- {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
- {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
- {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
- {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
- {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
- {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
- {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index 8568a51f6414..215fe6260fd7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -24,6 +24,7 @@
#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
+#define CUDBG_STATUS_PARTIAL_DATA -41
#define CUDBG_MAJOR_VERSION 1
#define CUDBG_MINOR_VERSION 14
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 9da6f57901a9..0afcfe99bff3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1339,16 +1339,39 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
+static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
+ struct sge_qbase_reg_field *qbase,
+ u32 func, bool is_pf)
+{
+ u32 *buff, i;
+
+ if (is_pf) {
+ buff = qbase->pf_data_value[func];
+ } else {
+ buff = qbase->vf_data_value[func];
+ /* In SGE_QBASE_INDEX,
+ * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
+ */
+ func += 8;
+ }
+
+ t4_write_reg(padap, qbase->reg_addr, func);
+ for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
+ *buff = t4_read_reg(padap, qbase->reg_data[i]);
+}
+
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
&temp_buff);
if (rc)
return rc;
@@ -1370,6 +1393,28 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
+
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
+ /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
+ * SGE_QBASE_MAP[0-3]
+ */
+ sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
+ for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
+ sge_qbase->reg_data[i] =
+ t6_sge_qbase_index_array[i + 1];
+
+ for (i = 0; i <= PCIE_FW_MASTER_M; i++)
+ cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
+ i, true);
+
+ for (i = 0; i < padap->params.arch.vfcount; i++)
+ cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
+ i, false);
+
+ sge_qbase->vfcount = padap->params.arch.vfcount;
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
@@ -2366,8 +2411,11 @@ void cudbg_fill_le_tcam_info(struct adapter *padap,
value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
tcam_region->routing_start = value;
- /*Get clip table index */
- value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
+ /* Get clip table index. For T6 there is separate CLIP TCAM */
+ if (is_t6(padap->params.chip))
+ value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
+ else
+ value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
tcam_region->clip_start = value;
/* Get filter table index */
@@ -2392,8 +2440,16 @@ void cudbg_fill_le_tcam_info(struct adapter *padap,
tcam_region->tid_hash_base;
}
} else { /* hash not enabled */
- tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
+ if (is_t6(padap->params.chip))
+ tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
+ CUDBG_MAX_TID_COMP_EN :
+ CUDBG_MAX_TID_COMP_DIS;
+ else
+ tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
}
+
+ if (is_t6(padap->params.chip))
+ tcam_region->max_tid += CUDBG_T6_CLIP;
}
int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
@@ -2423,18 +2479,31 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
for (i = 0; i < tcam_region.max_tid; ) {
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
- cudbg_err->sys_err = rc;
- cudbg_put_buff(pdbg_init, &temp_buff);
- return rc;
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+ /* Update tcam header and exit */
+ tcam_region.max_tid = i;
+ memcpy(temp_buff.data, &tcam_region,
+ sizeof(struct cudbg_tcam));
+ goto out;
}
- /* ipv6 takes two tids */
- cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
+ if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
+ /* T6 CLIP TCAM: ipv6 takes 4 entries */
+ if (is_t6(padap->params.chip) &&
+ i >= tcam_region.clip_start &&
+ i < tcam_region.clip_start + CUDBG_T6_CLIP)
+ i += 4;
+ else /* Main TCAM: ipv6 takes two tids */
+ i += 2;
+ } else {
+ i++;
+ }
tid_data++;
bytes += sizeof(struct cudbg_tid_data);
}
+out:
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 688f95440af2..211086bdbe20 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,6 +50,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
+#include <linux/crash_dump.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -964,6 +965,9 @@ struct adapter {
struct hma_data hma;
struct srq_data *srq;
+
+ /* Dump buffer for collecting logs in kdump kernel */
+ struct vmcoredd_data vmcoredd;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1034,6 +1038,7 @@ struct ch_sched_queue {
#define VF_BITWIDTH 8
#define IVLAN_BITWIDTH 16
#define OVLAN_BITWIDTH 16
+#define ENCAP_VNI_BITWIDTH 24
/* Filter matching rules. These consist of a set of ingress packet field
* (value, mask) tuples. The associated ingress packet field matches the
@@ -1064,6 +1069,7 @@ struct ch_filter_tuple {
uint32_t ivlan_vld:1; /* inner VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN valid */
uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t encap_vld:1; /* Encapsulation valid */
uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
uint32_t iport:IPORT_BITWIDTH; /* ingress port */
@@ -1074,6 +1080,7 @@ struct ch_filter_tuple {
uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+ uint32_t vni:ENCAP_VNI_BITWIDTH; /* VNI of tunnel */
/* Uncompressed header matching field rules. These are always
* available for field rules.
@@ -1690,6 +1697,12 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
const u8 *addr, const u8 *mask, unsigned int idx,
u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx,
+ bool sleep_ok);
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int vni,
+ unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+ bool sleep_ok);
int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
const u8 *addr, const u8 *mask, unsigned int idx,
u8 lookup_type, u8 port_id, bool sleep_ok);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 143686c60234..8d751efcb90e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -214,7 +214,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_SGE_INDIRECT:
- len = sizeof(struct ireg_buf) * 2;
+ len = sizeof(struct ireg_buf) * 2 +
+ sizeof(struct sge_qbase_reg_field);
break;
case CUDBG_ULPRX_LA:
len = sizeof(struct cudbg_ulprx_la);
@@ -488,3 +489,28 @@ void cxgb4_init_ethtool_dump(struct adapter *adapter)
adapter->eth_dump.version = adapter->params.fw_vers;
adapter->eth_dump.len = 0;
}
+
+static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
+{
+ struct adapter *adap = container_of(data, struct adapter, vmcoredd);
+ u32 len = data->size;
+
+ return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
+}
+
+int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
+{
+ struct vmcoredd_data *data = &adap->vmcoredd;
+ u32 len;
+
+ len = sizeof(struct cudbg_hdr) +
+ sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
+ len += CUDBG_DUMP_BUFF_SIZE;
+
+ data->size = len;
+ snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
+ cxgb4_driver_name, adap->name);
+ data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
+
+ return vmcore_add_device_dump(data);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index ce1ac9a1c878..ef59ba1ed968 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -41,8 +41,11 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS {
CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */
};
+#define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW)
+
u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag);
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag);
void cxgb4_init_ethtool_dump(struct adapter *adapter);
+int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap);
#endif /* __CXGB4_CUDBG_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index db92f1858060..00fc5f1afb1d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -64,8 +64,7 @@ static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
if (!skb)
return -ENOMEM;
- req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
@@ -266,6 +265,8 @@ static int validate_filter(struct net_device *dev,
fs->mask.pfvf_vld) ||
unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
fs->mask.ovlan_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
+ fs->mask.encap_vld) ||
unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
return -EOPNOTSUPP;
@@ -276,8 +277,12 @@ static int validate_filter(struct net_device *dev,
* carries that overlap, we need to translate any PF/VF
* specification into that internal format below.
*/
- if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
- is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
+ (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
return -EOPNOTSUPP;
if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
(is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
@@ -307,6 +312,9 @@ static int validate_filter(struct net_device *dev,
fs->newvlan == VLAN_REWRITE))
return -EOPNOTSUPP;
+ if (fs->val.encap_vld &&
+ CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return -EOPNOTSUPP;
return 0;
}
@@ -706,6 +714,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
*/
void clear_filter(struct adapter *adap, struct filter_entry *f)
{
+ struct port_info *pi = netdev_priv(f->dev);
+
/* If the new or old filter have loopback rewriteing rules then we'll
* need to free any existing L2T, SMT, CLIP entries of filter
* rule.
@@ -716,6 +726,12 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
+ if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+ if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan &
+ 0x1ff].refcnt))
+ t4_free_encap_mac_filt(adap, pi->viid,
+ f->fs.val.ovlan & 0x1ff, 0);
+
if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
@@ -836,11 +852,15 @@ bool is_filter_exact_match(struct adapter *adap,
{
struct tp_params *tp = &adap->params.tp;
u64 hash_filter_mask = tp->hash_filter_mask;
- u32 mask;
+ u64 ntuple_mask = 0;
if (!is_hashfilter(adap))
return false;
+ /* Keep tunnel VNI match disabled for hash-filters for now */
+ if (fs->mask.encap_vld)
+ return false;
+
if (fs->type) {
if (is_inaddr_any(fs->val.fip, AF_INET6) ||
!is_addr_all_mask(fs->mask.fip, AF_INET6))
@@ -865,73 +885,45 @@ bool is_filter_exact_match(struct adapter *adap,
if (!fs->val.fport || fs->mask.fport != 0xffff)
return false;
- if (tp->fcoe_shift >= 0) {
- mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
- if (mask && !fs->mask.fcoe)
- return false;
- }
+ /* calculate tuple mask and compare with mask configured in hw */
+ if (tp->fcoe_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
- if (tp->port_shift >= 0) {
- mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
- if (mask && !fs->mask.iport)
- return false;
- }
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
if (tp->vnic_shift >= 0) {
- mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
-
- if ((adap->params.tp.ingress_config & VNIC_F)) {
- if (mask && !fs->mask.pfvf_vld)
- return false;
- } else {
- if (mask && !fs->mask.ovlan_vld)
- return false;
- }
+ if ((adap->params.tp.ingress_config & VNIC_F))
+ ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
+ else
+ ntuple_mask |= (u64)fs->mask.ovlan_vld <<
+ tp->vnic_shift;
}
- if (tp->vlan_shift >= 0) {
- mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
- if (mask && !fs->mask.ivlan)
- return false;
- }
+ if (tp->vlan_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
- if (tp->tos_shift >= 0) {
- mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
- if (mask && !fs->mask.tos)
- return false;
- }
+ if (tp->tos_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
- if (tp->protocol_shift >= 0) {
- mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
- if (mask && !fs->mask.proto)
- return false;
- }
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
- if (tp->ethertype_shift >= 0) {
- mask = (hash_filter_mask >> tp->ethertype_shift) &
- FT_ETHERTYPE_W;
- if (mask && !fs->mask.ethtype)
- return false;
- }
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
- if (tp->macmatch_shift >= 0) {
- mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
- if (mask && !fs->mask.macidx)
- return false;
- }
+ if (tp->macmatch_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+
+ if (tp->matchtype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
+
+ if (tp->frag_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return false;
- if (tp->matchtype_shift >= 0) {
- mask = (hash_filter_mask >> tp->matchtype_shift) &
- FT_MPSHITTYPE_W;
- if (mask && !fs->mask.matchtype)
- return false;
- }
- if (tp->frag_shift >= 0) {
- mask = (hash_filter_mask >> tp->frag_shift) &
- FT_FRAGMENTATION_W;
- if (mask && !fs->mask.frag)
- return false;
- }
return true;
}
@@ -962,8 +954,12 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
if (tp->vnic_shift >= 0) {
- if ((adap->params.tp.ingress_config & VNIC_F) &&
- fs->mask.pfvf_vld)
+ if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
+ fs->mask.encap_vld)
+ ntuple |= (u64)((fs->val.encap_vld << 16) |
+ (fs->val.ovlan)) << tp->vnic_shift;
+ else if ((adap->params.tp.ingress_config & VNIC_F) &&
+ fs->mask.pfvf_vld)
ntuple |= (u64)((fs->val.pfvf_vld << 16) |
(fs->val.pf << 13) |
(fs->val.vf)) << tp->vnic_shift;
@@ -1077,6 +1073,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ struct port_info *pi = netdev_priv(dev);
struct tid_info *t = &adapter->tids;
struct filter_entry *f;
struct sk_buff *skb;
@@ -1143,13 +1140,34 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
f->fs.val.ovlan_vld = fs->val.pfvf_vld;
f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ } else if (iconf & USE_ENC_IDX_F) {
+ if (f->fs.val.encap_vld) {
+ struct port_info *pi = netdev_priv(f->dev);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+ /* allocate MPS TCAM entry */
+ ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ f->fs.val.vni,
+ f->fs.mask.vni,
+ 0, 1, 1);
+ if (ret < 0)
+ goto free_atid;
+
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ f->fs.val.ovlan = ret;
+ f->fs.mask.ovlan = 0xffff;
+ f->fs.val.ovlan_vld = 1;
+ f->fs.mask.ovlan_vld = 1;
+ }
}
size = sizeof(struct cpl_t6_act_open_req);
if (f->fs.type) {
ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
if (ret)
- goto free_atid;
+ goto free_mps;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
@@ -1164,7 +1182,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
- goto free_atid;
+ goto free_mps;
}
mk_act_open_req(f, skb,
@@ -1180,6 +1198,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
free_clip:
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+free_mps:
+ if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+ t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
+
free_atid:
cxgb4_free_atid(t, atid);
@@ -1361,6 +1383,27 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
f->fs.val.ovlan_vld = fs->val.pfvf_vld;
f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ } else if (iconf & USE_ENC_IDX_F) {
+ if (f->fs.val.encap_vld) {
+ struct port_info *pi = netdev_priv(f->dev);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+ /* allocate MPS TCAM entry */
+ ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ f->fs.val.vni,
+ f->fs.mask.vni,
+ 0, 1, 1);
+ if (ret < 0)
+ goto free_clip;
+
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ f->fs.val.ovlan = ret;
+ f->fs.mask.ovlan = 0x1ff;
+ f->fs.val.ovlan_vld = 1;
+ f->fs.mask.ovlan_vld = 1;
+ }
}
/* Attempt to set the filter. If we don't succeed, we clear
@@ -1377,6 +1420,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
return ret;
+
+free_clip:
+ if (is_t6(adapter->params.chip) && f->fs.type)
+ cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET, chip_ver);
+ return ret;
}
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24d2865b8806..130d1eed7993 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2886,13 +2886,13 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
}
/* Convert from Mbps to Kbps */
- req_rate = rate << 10;
+ req_rate = rate * 1000;
/* Max rate is 100 Gbps */
- if (req_rate >= SCHED_MAX_RATE_KBPS) {
+ if (req_rate > SCHED_MAX_RATE_KBPS) {
dev_err(adap->pdev_dev,
"Invalid rate %u Mbps, Max rate is %u Mbps\n",
- rate, SCHED_MAX_RATE_KBPS >> 10);
+ rate, SCHED_MAX_RATE_KBPS / 1000);
return -ERANGE;
}
@@ -3081,7 +3081,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
match_all_mac, match_all_mac,
adapter->rawf_start +
pi->port_id,
- 1, pi->port_id, true);
+ 1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
i);
@@ -3169,7 +3169,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
match_all_mac,
adapter->rawf_start +
pi->port_id,
- 1, pi->port_id, true);
+ 1, pi->port_id, false);
if (ret < 0) {
netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
be16_to_cpu(ti->port));
@@ -3433,8 +3433,8 @@ static int adap_config_hma(struct adapter *adapter)
sgl = adapter->hma.sgt->sgl;
node = dev_to_node(adapter->pdev_dev);
for_each_sg(sgl, iter, sgt->orig_nents, i) {
- newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL,
- page_order);
+ newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
+ __GFP_ZERO, page_order);
if (!newpage) {
dev_err(adapter->pdev_dev,
"Not enough memory for HMA page allocation\n");
@@ -4276,6 +4276,20 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ /* Read the raw mps entries. In T6, the last 2 tcam entries
+ * are reserved for raw mac addresses (rawf = 2, one per port).
+ */
+ params[0] = FW_PARAM_PFVF(RAWF_START);
+ params[1] = FW_PARAM_PFVF(RAWF_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
+ params, val);
+ if (ret == 0) {
+ adap->rawf_start = val[0];
+ adap->rawf_cnt = val[1] - val[0] + 1;
+ }
+ }
+
/* qids (ingress/egress) returned from firmware can be anywhere
* in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
* Hence driver needs to allocate memory for this range to
@@ -5181,6 +5195,7 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
+ kvfree(adapter->mps_encap);
kvfree(adapter->smt);
kvfree(adapter->l2t);
kvfree(adapter->srq);
@@ -5261,13 +5276,9 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
u32 pcie_fw;
pcie_fw = readl(adap->regs + PCIE_FW_A);
- /* Check if cxgb4 is the MASTER and fw is initialized */
- if (num_vfs &&
- (!(pcie_fw & PCIE_FW_INIT_F) ||
- !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
- dev_warn(&pdev->dev,
- "cxgb4 driver needs to be MASTER to support SRIOV\n");
+ /* Check if fw is initialized */
+ if (!(pcie_fw & PCIE_FW_INIT_F)) {
+ dev_warn(&pdev->dev, "Device not initialized\n");
return -EOPNOTSUPP;
}
@@ -5474,6 +5485,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
spin_lock_init(&adapter->mbox_lock);
INIT_LIST_HEAD(&adapter->mlist.list);
+ adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
pci_set_drvdata(pdev, adapter);
if (func != ent->driver_data) {
@@ -5508,8 +5520,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
- adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
-
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -5544,6 +5554,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_adapter;
+ if (is_kdump_kernel()) {
+ /* Collect hardware state and append to /proc/vmcore */
+ err = cxgb4_cudbg_vmcore_add_dump(adapter);
+ if (err) {
+ dev_warn(adapter->pdev_dev,
+ "Fail collecting vmcore device dump, err: %d. Continuing\n",
+ err);
+ err = 0;
+ }
+ }
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
@@ -5611,8 +5631,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
- if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) {
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
@@ -5677,6 +5704,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+ adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) *
+ adapter->params.arch.mps_tcam_size,
+ GFP_KERNEL);
+ if (!adapter->mps_encap)
+ dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
+
#if IS_ENABLED(CONFIG_IPV6)
if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
(!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 36563364bae7..3ddd2c4acf68 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -194,6 +194,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.tos = mask->tos;
}
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key, *mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ cls->mask);
+ fs->val.vni = be32_to_cpu(key->keyid);
+ fs->mask.vni = be32_to_cpu(mask->keyid);
+ if (fs->mask.vni) {
+ fs->val.encap_vld = 1;
+ fs->mask.encap_vld = 1;
+ }
+ }
+
if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key, *mask;
u16 vlan_tci, vlan_tci_mask;
@@ -247,6 +264,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1817a0307d26..77c2c538b1fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -491,7 +491,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
if (tp->protocol_shift >= 0)
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
- if (tp->vnic_shift >= 0) {
+ if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
u32 viid = cxgb4_port_viid(dev);
u32 vf = FW_VIID_VIN_G(viid);
u32 pf = FW_VIID_PFN_G(viid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1a28df137e1f..276f22357f81 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1072,12 +1072,27 @@ static void *inline_tx_skb_header(const struct sk_buff *skb,
static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
- const struct iphdr *iph = ip_hdr(skb);
+ bool inner_hdr_csum = false;
+ u16 proto, ver;
- if (iph->version == 4) {
- if (iph->protocol == IPPROTO_TCP)
+ if (skb->encapsulation &&
+ (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
+ inner_hdr_csum = true;
+
+ if (inner_hdr_csum) {
+ ver = inner_ip_hdr(skb)->version;
+ proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
+ inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 4) ? ip_hdr(skb)->protocol :
+ ipv6_hdr(skb)->nexthdr;
+ }
+
+ if (ver == 4) {
+ if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP;
- else if (iph->protocol == IPPROTO_UDP)
+ else if (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP;
else {
nocsum: /*
@@ -1090,19 +1105,29 @@ nocsum: /*
/*
* this doesn't work with extension headers
*/
- const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
-
- if (ip6h->nexthdr == IPPROTO_TCP)
+ if (proto == IPPROTO_TCP)
csum_type = TX_CSUM_TCPIP6;
- else if (ip6h->nexthdr == IPPROTO_UDP)
+ else if (proto == IPPROTO_UDP)
csum_type = TX_CSUM_UDPIP6;
else
goto nocsum;
}
if (likely(csum_type >= TX_CSUM_TCPIP)) {
- u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
- int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+ int eth_hdr_len, l4_len;
+ u64 hdr_len;
+
+ if (inner_hdr_csum) {
+ /* This allows checksum offload for all encapsulated
+ * packets like GRE etc..
+ */
+ l4_len = skb_inner_network_header_len(skb);
+ eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
+ } else {
+ l4_len = skb_network_header_len(skb);
+ eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+ }
+ hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
@@ -1273,7 +1298,7 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
- u64 cntrl, *end;
+ u64 cntrl, *end, *sgl;
int qidx, credits;
unsigned int flits, ndesc;
struct adapter *adap;
@@ -1386,8 +1411,9 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
+ len += sizeof(*cpl);
if (ssi->gso_size) {
- struct cpl_tx_pkt_lso *lso = (void *)wr;
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
@@ -1417,20 +1443,19 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
+ lso->len = htonl(skb->len);
else
- lso->c.len =
- htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
if (CHELSIO_CHIP_VERSION(adap->params.chip)
@@ -1443,10 +1468,22 @@ out_free: dev_kfree_skb_any(skb);
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN_V(l3hdr_len);
}
+ sgl = (u64 *)(cpl + 1); /* sgl start here */
+ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ if (sgl == (u64 *)q->q.stat) {
+ int left = (u8 *)end - (u8 *)q->q.stat;
+
+ end = (void *)q->q.desc + left;
+ sgl = (void *)q->q.desc;
+ }
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- len += sizeof(*cpl);
if (ptp_enabled)
op = FW_PTP_TX_PKT_WR;
else
@@ -1454,6 +1491,7 @@ out_free: dev_kfree_skb_any(skb);
wr->op_immdlen = htonl(FW_WR_OP_V(op) |
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
+ sgl = (u64 *)(cpl + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
cntrl = hwcsum(adap->params.chip, skb) |
TXPKT_IPCSUM_DIS_F;
@@ -1487,20 +1525,19 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
int last_desc;
- cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
- end, 0, addr);
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
}
txq_advance(&q->q, ndesc);
@@ -2259,7 +2296,7 @@ static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
}
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
- const struct cpl_rx_pkt *pkt)
+ const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
{
struct adapter *adapter = rxq->rspq.adap;
struct sge *s = &adapter->sge;
@@ -2275,6 +2312,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
}
copy_frags(skb, gl, s->pktshift);
+ if (tnl_hdr_len)
+ skb->csum_level = 1;
skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
@@ -2406,7 +2445,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- u16 err_vec;
+ u16 err_vec, tnl_hdr_len = 0;
struct port_info *pi;
int ret = 0;
@@ -2415,16 +2454,19 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp;
/* Compressed error vector is enabled for T6 only */
- if (q->adap->params.tp.rx_pkt_encap)
+ if (q->adap->params.tp.rx_pkt_encap) {
err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
- else
+ tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
+ } else {
err_vec = be16_to_cpu(pkt->err_vec);
+ }
csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
- if ((pkt->l2info & htonl(RXF_TCP_F)) &&
+ if (((pkt->l2info & htonl(RXF_TCP_F)) ||
+ tnl_hdr_len) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
- do_gro(rxq, si, pkt);
+ do_gro(rxq, si, pkt, tnl_hdr_len);
return 0;
}
@@ -2471,7 +2513,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
} else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
- skb->ip_summed = CHECKSUM_COMPLETE;
+
+ if (tnl_hdr_len) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 1;
+ } else {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
rxq->stats.rx_cso++;
}
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 6228a5708307..82b70a565e24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -84,8 +84,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
if (!skb)
return -ENOMEM;
req = (struct cpl_srq_table_req *)
- __skb_put(skb, sizeof(*req));
- memset(req, 0, sizeof(*req));
+ __skb_put_zero(skb, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
TID_TID_V(srq_idx) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 7cb3ef466cc7..df5e7c79223b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7513,6 +7513,43 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_encap_mac_filt - frees MPS entry at given index
+ * @adap: the adapter
+ * @viid: the VI id
+ * @idx: index of MPS entry to be freed
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Frees the MPS entry at supplied index
+ *
+ * Returns a negative error number or zero on success
+ */
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ int idx, bool sleep_ok)
+{
+ struct fw_vi_mac_exact *p;
+ u8 addr[] = {0, 0, 0, 0, 0, 0};
+ struct fw_vi_mac_cmd c;
+ int ret = 0;
+ u32 exact;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ exact |
+ FW_CMD_LEN16_V(1));
+ p = c.u.exact;
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ return ret;
+}
+
+/**
* t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
@@ -7563,6 +7600,55 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
}
/**
+ * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @vni: the VNI id for the tunnel protocol
+ * @vni_mask: mask for the VNI id
+ * @dip_hit: to enable DIP match for the MPS entry
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an MPS entry with specified MAC address and VNI value.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int vni,
+ unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+ bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_vni *p = c.u.exact_vni;
+ int ret = 0;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
+
+ p->lookup_type_to_vni =
+ cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
+ FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
+ FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
+ p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0)
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+ return ret;
+}
+
+/**
* t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fe2029e993a2..09e38f0733bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1233,6 +1233,11 @@ struct cpl_rx_pkt {
#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S)
#define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U)
+#define T6_RX_TNLHDR_LEN_S 8
+#define T6_RX_TNLHDR_LEN_M 0xFF
+#define T6_RX_TNLHDR_LEN_V(x) ((x) << T6_RX_TNLHDR_LEN_S)
+#define T6_RX_TNLHDR_LEN_G(x) (((x) >> T6_RX_TNLHDR_LEN_S) & T6_RX_TNLHDR_LEN_M)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 51b18035d691..adacc6399131 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -145,6 +145,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -184,6 +187,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 276fdf214b75..6b55aa2eb2a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1598,6 +1598,10 @@
#define VNIC_V(x) ((x) << VNIC_S)
#define VNIC_F VNIC_V(1U)
+#define USE_ENC_IDX_S 13
+#define USE_ENC_IDX_V(x) ((x) << USE_ENC_IDX_S)
+#define USE_ENC_IDX_F USE_ENC_IDX_V(1U)
+
#define CSUM_HAS_PSEUDO_HDR_S 10
#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
@@ -2995,6 +2999,7 @@
#define LE_DB_HASH_TID_BASE_A 0x19c30
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_CLCAM_TID_BASE_A 0x19df4
#define LE_DB_TID_HASHBASE_A 0x19df8
#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index e3d4751f21ac..e6b2e9549d56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1305,6 +1305,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
+ FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
};
@@ -2156,6 +2158,14 @@ struct fw_vi_mac_cmd {
__be64 data0m_pkd;
__be32 data1m[2];
} raw;
+ struct fw_vi_mac_vni {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ __be16 r7;
+ __u8 macaddr_mask[6];
+ __be32 lookup_type_to_vni;
+ __be32 vni_mask_pkd;
+ } exact_vni[2];
} u;
};
@@ -2203,6 +2213,32 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_S 31
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_M 0x1
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_V(x) ((x) << FW_VI_MAC_CMD_LOOKUP_TYPE_S)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_LOOKUP_TYPE_S) & FW_VI_MAC_CMD_LOOKUP_TYPE_M)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_F FW_VI_MAC_CMD_LOOKUP_TYPE_V(1U)
+
+#define FW_VI_MAC_CMD_DIP_HIT_S 30
+#define FW_VI_MAC_CMD_DIP_HIT_M 0x1
+#define FW_VI_MAC_CMD_DIP_HIT_V(x) ((x) << FW_VI_MAC_CMD_DIP_HIT_S)
+#define FW_VI_MAC_CMD_DIP_HIT_G(x) \
+ (((x) >> FW_VI_MAC_CMD_DIP_HIT_S) & FW_VI_MAC_CMD_DIP_HIT_M)
+#define FW_VI_MAC_CMD_DIP_HIT_F FW_VI_MAC_CMD_DIP_HIT_V(1U)
+
+#define FW_VI_MAC_CMD_VNI_S 0
+#define FW_VI_MAC_CMD_VNI_M 0xffffff
+#define FW_VI_MAC_CMD_VNI_V(x) ((x) << FW_VI_MAC_CMD_VNI_S)
+#define FW_VI_MAC_CMD_VNI_G(x) \
+ (((x) >> FW_VI_MAC_CMD_VNI_S) & FW_VI_MAC_CMD_VNI_M)
+
+#define FW_VI_MAC_CMD_VNI_MASK_S 0
+#define FW_VI_MAC_CMD_VNI_MASK_M 0xffffff
+#define FW_VI_MAC_CMD_VNI_MASK_V(x) ((x) << FW_VI_MAC_CMD_VNI_MASK_S)
+#define FW_VI_MAC_CMD_VNI_MASK_G(x) \
+ (((x) >> FW_VI_MAC_CMD_VNI_MASK_S) & FW_VI_MAC_CMD_VNI_MASK_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 123e2c1b65f5..4eb15ceddca3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x3F
+#define T4FW_VERSION_MINOR 0x13
+#define T4FW_VERSION_MICRO 0x01
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x3F
+#define T5FW_VERSION_MINOR 0x13
+#define T5FW_VERSION_MICRO 0x01
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x3F
+#define T6FW_VERSION_MINOR 0x13
+#define T6FW_VERSION_MICRO 0x01
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index 4b5aacc09cab..240ba9d4c399 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -90,8 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_tid_release *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
@@ -104,8 +103,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_close_con_req *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
@@ -119,8 +117,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_abort_req *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
@@ -134,8 +131,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
{
struct cpl_abort_rpl *rpl;
- rpl = __skb_put(skb, len);
- memset(rpl, 0, len);
+ rpl = __skb_put_zero(skb, len);
INIT_TP_WR(rpl, tid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
@@ -149,8 +145,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
{
struct cpl_rx_data_ack *req;
- req = __skb_put(skb, len);
- memset(req, 0, len);
+ req = __skb_put_zero(skb, len);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8bb0db990c8f..00a57273b753 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1246,8 +1246,7 @@ error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
free2:
- if (priv->clk)
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
free:
free_netdev(netdev);
out:
@@ -1271,8 +1270,7 @@ static int ethoc_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
}
- if (priv->clk)
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6e490fd2345d..a580a3dcbe59 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -22,7 +22,7 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28)
+ ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
imply PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index e7381f8ef89d..4778b663653e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -21,7 +21,7 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4604bc8eb5b..4358f586e28f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2052,13 +2052,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->parent = &pdev->dev;
node = of_get_child_by_name(pdev->dev.of_node, "mdio");
- if (node) {
- err = of_mdiobus_register(fep->mii_bus, node);
+ err = of_mdiobus_register(fep->mii_bus, node);
+ if (node)
of_node_put(node);
- } else {
- err = mdiobus_register(fep->mii_bus);
- }
-
if (err)
goto err_out_free_mdiobus;
@@ -2111,7 +2107,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 6552d68ea6e1..ce6e24c74978 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1391,12 +1391,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
* workaround
*/
- if (port->rev_info.major >= 6) {
- u32 reg;
+ u32 reg;
- reg = 0x00001013;
- iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
- }
+ reg = 0x00001013;
+ iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 4df282ed22c7..0beee2cc2ddd 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-single-collision",
"tx-multiple-collision",
- "tx-late-collsion",
+ "tx-late-collision",
"tx-aborted-frames",
"tx-lost-frames",
"tx-carrier-sense-errors",
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 3e62692af011..fa5b30f547f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -87,7 +87,7 @@ do { \
#define HNAE_AE_REGISTER 0x1
-#define RCB_RING_NAME_LEN 16
+#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
#define HNAE_LOW_LATENCY_COAL_PARAM 80
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 02145f2de820..63d7dbfb90bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -50,13 +50,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
+ if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail to instantiate client\n");
- return ret;
+ return ret;
+ }
+
+ hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1);
+ return 0;
+ }
+
+ if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0);
}
- ae_dev->ops->uninit_client_instance(client, ae_dev);
return 0;
}
@@ -89,7 +98,7 @@ int hnae3_register_client(struct hnae3_client *client)
exit:
mutex_unlock(&hnae3_common_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(hnae3_register_client);
@@ -112,7 +121,7 @@ EXPORT_SYMBOL(hnae3_unregister_client);
* @ae_algo: AE algorithm
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
{
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
@@ -151,8 +160,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
}
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_algo);
@@ -168,6 +175,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -191,22 +201,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0, lock_acquired;
+ int ret = 0;
- /* we can get deadlocked if SRIOV is being enabled in context to probe
- * and probe gets called again in same context. This can happen when
- * pci_enable_sriov() is called to create VFs from PF probes context.
- * Therefore, for simplicity uniformly defering further probing in all
- * cases where we detect contention.
- */
- lock_acquired = mutex_trylock(&hnae3_common_lock);
- if (!lock_acquired)
- return -EPROBE_DEFER;
+ mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
@@ -220,7 +222,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
- ret = -EOPNOTSUPP;
goto out_err;
}
@@ -247,8 +248,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
out_err:
mutex_unlock(&hnae3_common_lock);
-
- return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);
@@ -264,6 +263,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
if (!id)
continue;
@@ -283,3 +285,4 @@ EXPORT_SYMBOL(hnae3_unregister_ae_dev);
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
+MODULE_VERSION(HNAE3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 37ec1b3286c6..45c571eea2ae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -36,6 +36,8 @@
#include <linux/pci.h>
#include <linux/types.h>
+#define HNAE3_MOD_VERSION "1.0"
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -52,6 +54,7 @@
#define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+#define HNAE3_CLIENT_INITED_B 0x3
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -273,10 +276,6 @@ struct hnae3_ae_dev {
* Map rings to vector
* unmap_ring_from_vector()
* Unmap rings from vector
- * add_tunnel_udp()
- * Add tunnel information to hardware
- * del_tunnel_udp()
- * Delete tunnel information from hardware
* reset_queue()
* Reset queue
* get_fw_version()
@@ -388,9 +387,6 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
- int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
- int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
-
void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
@@ -521,11 +517,11 @@ struct hnae3_handle {
#define hnae_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift))
-int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
-int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c55965a66ac..cac51954f2cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -502,7 +502,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
/* find outer header point */
l3.hdr = skb_network_header(skb);
- l4_hdr = skb_inner_transport_header(skb);
+ l4_hdr = skb_transport_header(skb);
if (skb->protocol == htons(ETH_P_IPV6)) {
exthdr = l3.hdr + sizeof(*l3.v6);
@@ -1244,93 +1244,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
stats->tx_compressed = netdev->stats.tx_compressed;
}
-static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (udp_tnl->used && udp_tnl->dst_port == port) {
- udp_tnl->used++;
- return;
- }
-
- if (udp_tnl->used) {
- netdev_warn(netdev,
- "UDP tunnel [%d], port [%d] offload\n", type, port);
- return;
- }
-
- udp_tnl->dst_port = port;
- udp_tnl->used = 1;
- /* TBD send command to hardware to add port */
- if (h->ae_algo->ops->add_tunnel_udp)
- h->ae_algo->ops->add_tunnel_udp(h, port);
-}
-
-static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
- enum hns3_udp_tnl_type type)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
- struct hnae3_handle *h = priv->ae_handle;
-
- if (!udp_tnl->used || udp_tnl->dst_port != port) {
- netdev_warn(netdev,
- "Invalid UDP tunnel port %d\n", port);
- return;
- }
-
- udp_tnl->used--;
- if (udp_tnl->used)
- return;
-
- udp_tnl->dst_port = 0;
- /* TBD send command to hardware to del port */
- if (h->ae_algo->ops->del_tunnel_udp)
- h->ae_algo->ops->del_tunnel_udp(h, port);
-}
-
-/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
- * @netdev: This physical ports's netdev
- * @ti: Tunnel information
- */
-static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
- break;
- }
-}
-
-static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
-{
- u16 port_n = ntohs(ti->port);
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
- break;
- default:
- break;
- }
-}
-
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
@@ -1569,13 +1482,50 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_get_stats64 = hns3_nic_get_stats64,
.ndo_setup_tc = hns3_nic_setup_tc,
.ndo_set_rx_mode = hns3_nic_set_rx_mode,
- .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
- .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
+static bool hns3_is_phys_func(struct pci_dev *pdev)
+{
+ u32 dev_id = pdev->device;
+
+ switch (dev_id) {
+ case HNAE3_DEV_ID_GE:
+ case HNAE3_DEV_ID_25GE:
+ case HNAE3_DEV_ID_25GE_RDMA:
+ case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_50GE_RDMA:
+ case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ return true;
+ case HNAE3_DEV_ID_100G_VF:
+ case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ return false;
+ default:
+ dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_id);
+ }
+
+ return false;
+}
+
+static void hns3_disable_sriov(struct pci_dev *pdev)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "disabling driver while VFs are assigned\n");
+ return;
+ }
+
+ pci_disable_sriov(pdev);
+}
+
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -1603,7 +1553,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev);
- return hnae3_register_ae_dev(ae_dev);
+ hnae3_register_ae_dev(ae_dev);
+
+ return 0;
}
/* hns3_remove - Device removal routine
@@ -1613,21 +1565,56 @@ static void hns3_remove(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
+ hns3_disable_sriov(pdev);
+
hnae3_unregister_ae_dev(ae_dev);
}
+/**
+ * hns3_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int ret;
+
+ if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
+ dev_warn(&pdev->dev, "Can not config SRIOV\n");
+ return -EINVAL;
+ }
+
+ if (num_vfs) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
+ else
+ return num_vfs;
+ } else if (!pci_vfs_assigned(pdev)) {
+ pci_disable_sriov(pdev);
+ } else {
+ dev_warn(&pdev->dev,
+ "Unable to free VFs because some are assigned to VMs.\n");
+ }
+
+ return 0;
+}
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
.probe = hns3_probe,
.remove = hns3_remove,
+ .sriov_configure = hns3_pci_sriov_configure,
};
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
- struct hnae3_handle *h = hns3_get_handle(netdev);
-
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1656,15 +1643,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->hw_features |=
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -1971,106 +1954,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-/* hns3_nic_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- */
-static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
- unsigned int max_size)
-{
- unsigned char *network;
- u8 hlen;
-
- /* This should never happen, but better safe than sorry */
- if (max_size < ETH_HLEN)
- return max_size;
-
- /* Initialize network frame pointer */
- network = data;
-
- /* Set first protocol and move network header forward */
- network += ETH_HLEN;
-
- /* Handle any vlan tag if present */
- if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
- == HNS3_RX_FLAG_VLAN_PRESENT) {
- if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
- return max_size;
-
- network += VLAN_HLEN;
- }
-
- /* Handle L3 protocols */
- if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV4) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct iphdr)))
- return max_size;
-
- /* Access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (network[0] & 0x0F) << 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return network - data;
-
- /* Record next protocol if header is present */
- } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
- == HNS3_RX_FLAG_L3ID_IPV6) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct ipv6hdr)))
- return max_size;
-
- /* Record next protocol */
- hlen = sizeof(struct ipv6hdr);
- } else {
- return network - data;
- }
-
- /* Relocate pointer to start of L4 header */
- network += hlen;
-
- /* Finally sort out TCP/UDP */
- if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_TCP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct tcphdr)))
- return max_size;
-
- /* Access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (network[12] & 0xF0) >> 2;
-
- /* Verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return network - data;
-
- network += hlen;
- } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
- == HNS3_RX_FLAG_L4ID_UDP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct udphdr)))
- return max_size;
-
- network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((typeof(max_size))(network - data) < max_size)
- return network - data;
- else
- return max_size;
-}
-
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2270,8 +2153,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- pull_len = hns3_nic_get_headlen(va, l234info,
- HNS3_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -3052,13 +2935,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev)
+static void hns3_init_mac_addr(struct net_device *netdev, bool init)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
- if (h->ae_algo->ops->get_mac_addr) {
+ if (h->ae_algo->ops->get_mac_addr && init) {
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
}
@@ -3112,7 +2995,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, true);
hns3_set_default_feature(netdev);
@@ -3298,9 +3181,35 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
hns3_nic_mc_sync(ndev, ha->addr);
}
-static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
+static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
- dev_kfree_skb_any(skb);
+ if (!HNAE3_IS_TX_RING(ring))
+ return;
+
+ while (ring->next_to_clean != ring->next_to_use) {
+ hns3_free_buffer_detach(ring, ring->next_to_clean);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+}
+
+static void hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ if (HNAE3_IS_TX_RING(ring))
+ return;
+
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so only need to unmap the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ hns3_unmap_buffer(ring,
+ &ring->desc_cb[ring->next_to_use]);
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
}
static void hns3_clear_all_ring(struct hnae3_handle *h)
@@ -3314,13 +3223,13 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring;
- hns3_clean_tx_ring(ring, ring->desc_num);
+ hns3_clear_tx_ring(ring);
dev_queue = netdev_get_tx_queue(ndev,
priv->ring_data[i].queue_index);
netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
- hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
+ hns3_clear_rx_ring(ring);
}
}
@@ -3359,7 +3268,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_init_mac_addr(netdev);
+ hns3_init_mac_addr(netdev, false);
hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev);
@@ -3600,6 +3509,8 @@ static int __init hns3_init_module(void)
client.ops = &client_ops;
+ INIT_LIST_HEAD(&client.node);
+
ret = hnae3_register_client(&client);
if (ret)
return ret;
@@ -3627,3 +3538,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
+MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 98cdbd3a1163..5b40f5a53761 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -14,6 +14,8 @@
#include "hnae3.h"
+#define HNS3_MOD_VERSION "1.0"
+
extern const char hns3_driver_version[];
enum hns3_nic_state {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index eb3c34f3cf87..c16bb6cb0564 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -74,7 +74,7 @@ struct hns3_link_mode_mapping {
u32 ethtool_link_mode;
};
-static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -85,11 +85,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
switch (loop) {
case HNAE3_MAC_INTER_LOOP_MAC:
- ret = h->ae_algo->ops->set_loopback(h, loop, true);
- break;
- case HNAE3_MAC_LOOP_NONE:
- ret = h->ae_algo->ops->set_loopback(h,
- HNAE3_MAC_INTER_LOOP_MAC, false);
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
ret = -ENOTSUPP;
@@ -99,10 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
if (ret)
return ret;
- if (loop == HNAE3_MAC_LOOP_NONE)
- h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC);
- else
- h->ae_algo->ops->set_promisc_mode(h, 1);
+ h->ae_algo->ops->set_promisc_mode(h, en);
return ret;
}
@@ -122,13 +115,13 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret;
}
- ret = hns3_lp_setup(ndev, loop_mode);
+ ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
return ret;
}
-static int hns3_lp_down(struct net_device *ndev)
+static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
@@ -136,7 +129,7 @@ static int hns3_lp_down(struct net_device *ndev)
if (!h->ae_algo->ops->stop)
return -EOPNOTSUPP;
- ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE);
+ ret = hns3_lp_setup(ndev, loop_mode, false);
if (ret) {
netdev_err(ndev, "lb_setup return error: %d\n", ret);
return ret;
@@ -332,7 +325,7 @@ static void hns3_self_test(struct net_device *ndev,
data[test_index] = hns3_lp_up(ndev, loop_type);
if (!data[test_index]) {
data[test_index] = hns3_lp_run_test(ndev, loop_type);
- hns3_lp_down(ndev);
+ hns3_lp_down(ndev, loop_type);
}
if (data[test_index])
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ff13d1876d9e..c36d64710fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h >= ring->desc_num))
+ return 0;
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
@@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
+ struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
struct hclge_cmq_ring *csq = &hw->cmq.csq;
u16 ntc = csq->next_to_clean;
struct hclge_desc *desc;
@@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ return 0;
+ }
while (head != ntc) {
memset(desc, 0, sizeof(*desc));
@@ -171,7 +190,11 @@ static int hclge_cmd_csq_done(struct hclge_hw *hw)
static bool hclge_is_special_opcode(u16 opcode)
{
- u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -362,9 +385,9 @@ int hclge_cmd_init(struct hclge_dev *hdev)
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{
- spin_lock_bh(&ring->lock);
+ spin_lock(&ring->lock);
hclge_free_cmd_desc(ring);
- spin_unlock_bh(&ring->lock);
+ spin_unlock(&ring->lock);
}
void hclge_destroy_cmd_queue(struct hclge_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 2066dd734444..2f0bbb6708b9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -304,8 +304,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
{"mac_tx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
{"mac_tx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
{"mac_tx_9217_12287_oct_pkt_num",
@@ -356,8 +354,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
{"mac_rx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_12287_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
{"mac_rx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
{"mac_rx_9217_12287_oct_pkt_num",
@@ -1459,8 +1455,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
/* We need to alloc a vport for main NIC of PF */
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
- if (hdev->num_tqps < num_vport)
- num_vport = hdev->num_tqps;
+ if (hdev->num_tqps < num_vport) {
+ dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ hdev->num_tqps, num_vport);
+ return -EINVAL;
+ }
/* Alloc the same number of TQPs for every vport */
tqp_per_vport = hdev->num_tqps / num_vport;
@@ -1474,21 +1473,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
hdev->vport = vport;
hdev->num_alloc_vport = num_vport;
-#ifdef CONFIG_PCI_IOV
- /* Enable SRIOV */
- if (hdev->num_req_vfs) {
- dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
- hdev->num_req_vfs);
- ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
- if (ret) {
- hdev->num_alloc_vfs = 0;
- dev_err(&pdev->dev, "SRIOV enable failed %d\n",
- ret);
- return ret;
- }
- }
- hdev->num_alloc_vfs = hdev->num_req_vfs;
-#endif
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ hdev->num_alloc_vfs = hdev->num_req_vfs;
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
@@ -2947,21 +2933,6 @@ static void hclge_service_task(struct work_struct *work)
hclge_service_complete(hdev);
}
-static void hclge_disable_sriov(struct hclge_dev *hdev)
-{
- /* If our VFs are assigned we cannot shut down SR-IOV
- * without causing issues, so just leave the hardware
- * available but disabled
- */
- if (pci_vfs_assigned(hdev->pdev)) {
- dev_warn(&hdev->pdev->dev,
- "disabling driver while VFs are assigned\n");
- return;
- }
-
- pci_disable_sriov(hdev->pdev);
-}
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
{
/* VF handle has no client */
@@ -3683,48 +3654,50 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
-static int hclge_set_loopback(struct hnae3_handle *handle,
- enum hnae3_loop loop_mode, bool en)
+static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_mac_mode_cmd *req;
- struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
u32 loop_en;
int ret;
- switch (loop_mode) {
- case HNAE3_MAC_INTER_LOOP_MAC:
- req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
- /* 1 Read out the MAC mode config at first */
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_CONFIG_MAC_MODE,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac loopback get fail, ret =%d.\n",
- ret);
- return ret;
- }
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n", ret);
+ return ret;
+ }
- /* 2 Then setup the loopback flag */
- loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- if (en)
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
- else
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
- /* 3 Config mac work mode with loopback flag
- * and its original configure parameters
- */
- hclge_cmd_reuse_desc(&desc, false);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "mac loopback set fail, ret =%d.\n", ret);
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ switch (loop_mode) {
+ case HNAE3_MAC_INTER_LOOP_MAC:
+ ret = hclge_set_mac_loopback(hdev, en);
break;
default:
ret = -ENOTSUPP;
@@ -3783,6 +3756,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw.mac.link = 0;
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
@@ -3819,6 +3793,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
@@ -4540,8 +4516,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
}
-int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto)
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
+ bool is_kill, u16 vlan, u8 qos,
+ __be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd *req0;
@@ -4599,12 +4576,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
return -EIO;
}
-static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
- __be16 proto, u16 vlan_id,
- bool is_kill)
+static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
+ u16 vlan_id, bool is_kill)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
struct hclge_vlan_filter_pf_cfg_cmd *req;
struct hclge_desc desc;
u8 vlan_offset_byte_val;
@@ -4624,22 +4598,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "port vlan command, send fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ u16 vport_id, u16 vlan_id, u8 qos,
+ bool is_kill)
+{
+ u16 vport_idx, vport_num = 0;
+ int ret;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
+ 0, proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "port vlan command, send fail, ret =%d.\n",
- ret);
+ "Set %d vport vlan filter config fail, ret =%d.\n",
+ vport_id, ret);
return ret;
}
- ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
- if (ret) {
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return 0;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Set pf vlan filter config fail, ret =%d.\n",
- ret);
- return -EIO;
+ "Add port vlan failed, vport %d is already in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
}
- return 0;
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_err(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %d is not in vlan %d\n",
+ vport_id, vlan_id);
+ return -EINVAL;
+ }
+
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
+ vport_num++;
+
+ if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
+ ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
+ is_kill);
+
+ return ret;
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
+ 0, is_kill);
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -4653,7 +4671,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
+ return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
}
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
@@ -4818,10 +4836,10 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
}
handle = &hdev->vport[0].nic;
- return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
-static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -5166,12 +5184,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
struct phy_device *phydev = hdev->hw.mac.phydev;
u32 fc_autoneg;
- /* Only support flow control negotiation for netdev with
- * phy attached for now.
- */
- if (!phydev)
- return -EOPNOTSUPP;
-
fc_autoneg = hclge_get_autoneg(handle);
if (auto_neg != fc_autoneg) {
dev_info(&hdev->pdev->dev,
@@ -5190,6 +5202,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
if (!fc_autoneg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
return phy_start_aneg(phydev);
}
@@ -5282,7 +5300,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
@@ -5290,11 +5308,11 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = rc->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
break;
@@ -5304,7 +5322,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&vport->nic);
if (ret)
- goto err;
+ return ret;
break;
case HNAE3_CLIENT_ROCE:
@@ -5316,18 +5334,16 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
- goto err;
+ return ret;
ret = client->ops->init_instance(&vport->roce);
if (ret)
- goto err;
+ return ret;
}
}
}
return 0;
-err:
- return ret;
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
@@ -5364,7 +5380,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -5402,8 +5418,6 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -5412,6 +5426,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ pcim_iounmap(pdev, hdev->hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -5427,7 +5442,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev) {
ret = -ENOMEM;
- goto err_hclge_dev;
+ goto out;
}
hdev->pdev = pdev;
@@ -5440,38 +5455,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI init failed\n");
- goto err_pci_init;
+ goto out;
}
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
- return ret;
+ goto err_pci_uninit;
}
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
if (ret)
- goto err_cmd_init;
+ goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
if (ret) {
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
- return ret;
+ goto err_cmd_uninit;
}
ret = hclge_misc_irq_init(hdev);
@@ -5479,69 +5494,71 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_err(&pdev->dev,
"Misc IRQ(vector0) init error, ret = %d.\n",
ret);
- return ret;
+ goto err_msi_uninit;
}
ret = hclge_alloc_tqps(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_alloc_vport(hdev);
if (ret) {
dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
- return ret;
+ goto err_msi_irq_uninit;
}
- ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
- return ret;
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ ret = hclge_mac_mdio_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio config fail ret=%d\n", ret);
+ goto err_msi_irq_uninit;
+ }
}
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
if (ret) {
dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = hclge_tm_schd_init(hdev);
if (ret) {
dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_rss_init_cfg(hdev);
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
ret = init_mgr_tbl(hdev);
if (ret) {
dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
- return ret;
+ goto err_mdiobus_unreg;
}
hclge_dcb_ops_set(hdev);
@@ -5564,11 +5581,21 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
-err_cmd_init:
+err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+err_msi_irq_uninit:
+ hclge_misc_irq_uninit(hdev);
+err_msi_uninit:
+ pci_free_irq_vectors(pdev);
+err_cmd_uninit:
+ hclge_destroy_cmd_queue(&hdev->hw);
+err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
pci_release_regions(pdev);
-err_pci_init:
- pci_set_drvdata(pdev, NULL);
-err_hclge_dev:
+ pci_disable_device(pdev);
+out:
return ret;
}
@@ -5586,6 +5613,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -5658,9 +5686,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- if (IS_ENABLED(CONFIG_PCI_IOV))
- hclge_disable_sriov(hdev);
-
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -6203,7 +6228,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
.enable_vlan_filter = hclge_enable_vlan_filter,
- .set_vlan_filter = hclge_set_port_vlan_filter,
+ .set_vlan_filter = hclge_set_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
@@ -6228,7 +6253,9 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- return hnae3_register_ae_algo(&ae_algo);
+ hnae3_register_ae_algo(&ae_algo);
+
+ return 0;
}
static void hclge_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0f4157e71282..93177d91eea4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,10 +12,12 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/phy.h>
+#include <linux/if_vlan.h>
+
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "v1.0"
+#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_INVALID_VPORT 0xffff
@@ -406,9 +408,9 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
- u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 rsv0;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
u64 mac_tx_1519_max_good_oct_pkt_num;
u64 mac_tx_1519_max_bad_oct_pkt_num;
@@ -433,9 +435,9 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
- u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
- u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 rsv1;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
u64 mac_rx_1519_max_good_oct_pkt_num;
u64 mac_rx_1519_max_bad_oct_pkt_num;
@@ -471,6 +473,7 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -562,6 +565,7 @@ struct hclge_dev {
u64 rx_pkts_for_led;
u64 tx_pkts_for_led;
+ unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
};
/* VPort level vlan tag configuration for TX direction */
@@ -646,8 +650,9 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
-int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
- bool is_kill, u16 vlan, u8 qos, __be16 proto);
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a6f7ffa9c259..b6ae26ba0a46 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -264,19 +264,23 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
{
- struct hclge_dev *hdev = vport->back;
int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
bool is_kill;
is_kill = !!mbx_req->msg[2];
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
- status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- is_kill, vlan, 0,
- cpu_to_be16(proto));
+ status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
+ vlan, is_kill);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ struct hnae3_handle *handle = &vport->nic;
+ bool en = mbx_req->msg[2] ? true : false;
+
+ status = hclge_en_hw_strip_rxvtag(handle, en);
}
if (gen_resp)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 682c2d6618e7..9f7932e423b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
struct mii_bus *mdio_bus;
int ret;
- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR)
- return 0;
+ if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
+ }
mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
if (!mdio_bus)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 885f25cd7be4..262c125f8137 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get pfc pause stats fail, ret = %d.\n", ret);
+ if (ret)
return ret;
- }
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
struct hclge_pfc_stats_cmd *pfc_stats =
@@ -503,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+ u32 bit_map)
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_desc desc;
@@ -514,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
bp_to_qs_map_cmd->tc_id = tc;
-
- /* Qset and tc is one by one mapping */
- bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
+ bp_to_qs_map_cmd->qs_group_id = grp_id;
+ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1170,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.hw_pfc_map);
}
+/* Each Tc has a 1024 queue sets to backpress, it divides to
+ * 32 group, each group contains 32 queue sets, which can be
+ * represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u32 i, k, qs_bitmap;
+ int ret;
+
+ for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ qs_bitmap = 0;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ u16 qs_id = vport->qs_offset + tc;
+ u8 grp, sub_grp;
+
+ grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
+ if (i == grp)
+ qs_bitmap |= (1 << sub_grp);
+
+ vport++;
+ }
+
+ ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
{
bool tx_en, rx_en;
@@ -1221,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_qs_bp_cfg(hdev, i);
+ ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 2dbe177581e9..c2b6e8a6700f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+#define HCLGE_BP_GRP_NUM 32
+#define HCLGE_BP_SUB_GRP_ID_S 0
+#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S 5
+#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
struct hclge_bp_to_qs_map_cmd {
u8 tc_id;
u8 rsvd[2];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 2b8426412cc9..2b0e3295989f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -830,6 +830,17 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
+ 1, false, NULL, 0);
+}
+
static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1552,7 +1563,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
- goto err_no_drvdata;
+ return ret;
}
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -1584,8 +1595,7 @@ err_clr_master:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_no_drvdata:
- pci_set_drvdata(pdev, NULL);
+
return ret;
}
@@ -1597,7 +1607,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
@@ -1625,6 +1634,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev);
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
@@ -1632,10 +1645,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
- ret = hclgevf_cmd_init(hdev);
- if (ret)
- goto err_cmd_init;
-
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -1683,10 +1692,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return 0;
err_config:
- hclgevf_cmd_uninit(hdev);
-err_cmd_init:
hclgevf_misc_irq_uninit(hdev);
err_misc_irq_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_irq_init:
@@ -1696,9 +1705,9 @@ err_irq_init:
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
- hclgevf_cmd_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
hclgevf_state_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_cmd_uninit(hdev);
hclgevf_uninit_msi(hdev);
hclgevf_pci_uninit(hdev);
}
@@ -1825,6 +1834,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
@@ -1842,7 +1852,9 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- return hnae3_register_ae_algo(&ae_algovf);
+ hnae3_register_ae_algo(&ae_algovf);
+
+ return 0;
}
static void hclgevf_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a477a7c36bbd..9763e742e6fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -9,7 +9,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_ROCEE_VECTOR_NUM 0
diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig
index 08db24954f7e..e4e8b24c1a5d 100644
--- a/drivers/net/ethernet/huawei/hinic/Kconfig
+++ b/drivers/net/ethernet/huawei/hinic/Kconfig
@@ -4,7 +4,7 @@
config HINIC
tristate "Huawei Intelligent PCIE Network Interface Card"
- depends on (PCI_MSI && X86)
+ depends on (PCI_MSI && (X86 || ARM64))
---help---
This driver supports HiNIC PCIE Ethernet cards.
To compile this driver as part of the kernel, choose Y here.
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index eb53bd93065e..5b122728dcb4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -51,7 +51,9 @@ static unsigned int rx_weight = 64;
module_param(rx_weight, uint, 0644);
MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
-#define PCI_DEVICE_ID_HI1822_PF 0x1822
+#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822
+#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200
+#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201
#define HINIC_WQ_NAME "hinic_dev"
@@ -1097,7 +1099,9 @@ static void hinic_remove(struct pci_dev *pdev)
}
static const struct pci_device_id hinic_pci_table[] = {
- { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index aad5658d79d5..4bb4646a5f92 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
return -1;
}
return 0;
@@ -794,46 +795,61 @@ static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
- struct device *dev = &adapter->vdev->dev;
+ int retry_count = 0;
int rc;
do {
- if (adapter->renegotiate) {
- adapter->renegotiate = false;
+ if (retry_count > IBMVNIC_MAX_QUEUES) {
+ netdev_warn(netdev, "Login attempts exceeded\n");
+ return -1;
+ }
+
+ adapter->init_done_rc = 0;
+ reinit_completion(&adapter->init_done);
+ rc = send_login(adapter);
+ if (rc) {
+ netdev_warn(netdev, "Unable to login\n");
+ return rc;
+ }
+
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ netdev_warn(netdev, "Login timed out\n");
+ return -1;
+ }
+
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
+ retry_count++;
release_sub_crqs(adapter, 1);
+ adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- dev_err(dev, "Capabilities query timeout\n");
+ netdev_warn(netdev,
+ "Capabilities query timed out\n");
return -1;
}
+
rc = init_sub_crqs(adapter);
if (rc) {
- dev_err(dev,
- "Initialization of SCRQ's failed\n");
+ netdev_warn(netdev,
+ "SCRQ initialization failed\n");
return -1;
}
+
rc = init_sub_crq_irqs(adapter);
if (rc) {
- dev_err(dev,
- "Initialization of SCRQ's irqs failed\n");
+ netdev_warn(netdev,
+ "SCRQ irq initialization failed\n");
return -1;
}
- }
-
- reinit_completion(&adapter->init_done);
- rc = send_login(adapter);
- if (rc) {
- dev_err(dev, "Unable to attempt device login\n");
- return rc;
- } else if (!wait_for_completion_timeout(&adapter->init_done,
- timeout)) {
- dev_err(dev, "Login timeout\n");
+ } else if (adapter->init_done_rc) {
+ netdev_warn(netdev, "Adapter login failed\n");
return -1;
}
- } while (adapter->renegotiate);
+ } while (adapter->init_done_rc == PARTIALSUCCESS);
/* handle pending MAC address changes after successful login */
if (adapter->mac_change_pending) {
@@ -1034,16 +1050,14 @@ static int __ibmvnic_open(struct net_device *netdev)
netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->rx_scrq[i]->irq);
- else
- enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
}
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
- else
- enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1115,7 +1129,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->rx_pool)
return;
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ rx_scrqs = adapter->num_active_rx_pools;
rx_entries = adapter->req_rx_add_entries_per_subcrq;
/* Free any remaining skbs in the rx buffer pools */
@@ -1164,7 +1178,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool || !adapter->tso_pool)
return;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
/* Free any remaining skbs in the tx buffer pools */
for (i = 0; i < tx_scrqs; i++) {
@@ -1184,6 +1198,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling tx_scrq[%d] irq\n", i);
+ disable_scrq_irq(adapter, adapter->tx_scrq[i]);
disable_irq(adapter->tx_scrq[i]->irq);
}
}
@@ -1193,6 +1208,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling rx_scrq[%d] irq\n", i);
+ disable_scrq_irq(adapter, adapter->rx_scrq[i]);
disable_irq(adapter->rx_scrq[i]->irq);
}
}
@@ -1806,9 +1822,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
}
+ ibmvnic_disable_irqs(adapter);
}
-
- ibmvnic_disable_irqs(adapter);
adapter->state = VNIC_CLOSED;
if (reset_state == VNIC_CLOSED)
@@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
- if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+ adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
netdev_notify_peers(netdev);
netif_carrier_on(netdev);
@@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
unsigned long rc;
+ u64 val;
if (scrq->hw_irq > 0x100000000ULL) {
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
return 1;
}
+ val = (0xff000000) | scrq->hw_irq;
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ val, rc);
+
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
if (rc)
@@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
struct vnic_login_client_data {
u8 type;
__be16 len;
- char name;
+ char name[];
} __packed;
static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
@@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
vlcd->type = 1;
len = strlen(os_name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, os_name, len);
- vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+ strncpy(vlcd->name, os_name, len);
+ vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 2 - LPAR name */
vlcd->type = 2;
len = strlen(utsname()->nodename) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, utsname()->nodename, len);
- vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+ strncpy(vlcd->name, utsname()->nodename, len);
+ vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 3 - device name */
vlcd->type = 3;
len = strlen(adapter->netdev->name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(&vlcd->name, adapter->netdev->name, len);
+ strncpy(vlcd->name, adapter->netdev->name, len);
}
static int send_login(struct ibmvnic_adapter *adapter)
@@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
* to resend the login buffer with fewer queues requested.
*/
if (login_rsp_crq->generic.rc.code) {
- adapter->renegotiate = true;
+ adapter->init_done_rc = login_rsp_crq->generic.rc.code;
complete(&adapter->init_done);
return 0;
}
@@ -4563,14 +4586,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
release_crq_queue(adapter);
}
- rc = init_stats_buffers(adapter);
- if (rc)
- return rc;
-
- rc = init_stats_token(adapter);
- if (rc)
- return rc;
-
return rc;
}
@@ -4639,13 +4654,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
+ rc = init_stats_buffers(adapter);
+ if (rc)
+ goto ibmvnic_init_fail;
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ goto ibmvnic_stats_fail;
+
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
- goto ibmvnic_init_fail;
+ goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
rc = register_netdev(netdev);
@@ -4664,6 +4687,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_register_fail:
device_remove_file(&dev->dev, &dev_attr_failover);
+ibmvnic_dev_file_err:
+ release_stats_token(adapter);
+
+ibmvnic_stats_fail:
+ release_stats_buffers(adapter);
+
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c0b58c2c39..22391e8805f6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1035,7 +1035,6 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
- bool renegotiate;
/* rx structs */
struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 41ad56edfb96..27d5f27163d2 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/100 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/*
* e100.c: Intel(R) PRO/100 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index c7caadd3c8af..314c52d44b7c 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2006 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 3a0feea2df54..c40729b2c184 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -1,32 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3e80ca170dd7..5d365a986bb0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- * Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2006 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* ethtool support for e1000 */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 6e7e923d57bf..48428d6a00be 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-*
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
- */
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* e1000_hw.c
* Shared functions for accessing and configuring the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index f09c569ec19b..b57a04954ccf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* e1000_hw.h
* Structures, enums, and macros for the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d5eb19b86a0a..2110d5f2da19 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
#include "e1000.h"
#include <net/ip6_checksum.h>
diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
index ae0559b8b011..e966bb290797 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
@@ -1,32 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
/* glue for the OS independent part of e1000
* includes register access macros
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 345f23927bcc..d3f29ffe1e47 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2006 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 953e99df420c..257bd59bc9c6 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index ee6d1256fda4..aa9d639c6cbb 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 924f2c8dfa6c..b9309302c29e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 9a24c645f726..834c238d02db 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 24e391a4ac68..44e58b6e7660 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,30 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 22883015a695..fd550dee4982 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index da88555ba1fd..c760dc72c520 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 64dc0c11147f..e084cb734eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for e1000 */
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 21802396bed6..eff75bd8a8f0 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 1551d6ce5341..cdae0efde8e6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 3c4f82c21084..eb09c755fa17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b293464a9f27..4abd55d646c5 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index cb0abf6c76a5..6ab261119801 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e027660aeb92..c4c9b20bc51f 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 3268f2e58593..d868aad806d4 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ec4a9759a6f2..d3fef7fefea8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 68949bb41b7b..937f9af22d26 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 8e082028be7d..6a30dfea4117 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 2def33eba9e6..098369fd3e65 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/netdevice.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b8226ed0e338..42233019255a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index d4180b5e9196..c48777d09523 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index f941e5085f44..37c76945ad9b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,24 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 16afc3c2a986..47f5ca793970 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,24 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index 93277cb99cb7..26a9746ccb14 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,26 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel(R) Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2016 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a9cdf763c59d..a903a0ba45e1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index e303d88720ef..f51a63fca513 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_common.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 2bdb24d2ca9d..4c48fb73b3e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_COMMON_H_
#define _FM10K_COMMON_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index c4f733452ef2..20768ac7f17e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 43e8d839831f..dca104121c05 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 28b6b4e56487..7657daa27298 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,40 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/vmalloc.h>
#include "fm10k.h"
struct fm10k_stats {
+ /* The stat_string is expected to be a format string formatted using
+ * vsnprintf by fm10k_add_stat_strings. Every member of a stats array
+ * should use the same format specifiers as they will be formatted
+ * using the same variadic arguments.
+ */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
-#define FM10K_NETDEV_STAT(_net_stat) { \
- .stat_string = #_net_stat, \
- .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
- .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+#define FM10K_STAT_FIELDS(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
}
+/* netdevice statistics */
+#define FM10K_NETDEV_STAT(_net_stat) \
+ FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \
+ _net_stat)
+
static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
FM10K_NETDEV_STAT(tx_packets),
FM10K_NETDEV_STAT(tx_bytes),
@@ -52,11 +44,9 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
#define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
-#define FM10K_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \
- .stat_offset = offsetof(struct fm10k_intfc, _stat) \
-}
+/* General interface statistics */
+#define FM10K_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("tx_restart_queue", restart_queue),
@@ -93,11 +83,9 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
};
-#define FM10K_MBX_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
- .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
-}
+/* mailbox statistics */
+#define FM10K_MBX_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
@@ -111,15 +99,13 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
-#define FM10K_QUEUE_STAT(_name, _stat) { \
- .stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
- .stat_offset = offsetof(struct fm10k_ring, _stat) \
-}
+/* per-queue ring statistics */
+#define FM10K_QUEUE_STAT(_name, _stat) \
+ FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat)
static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
- FM10K_QUEUE_STAT("packets", stats.packets),
- FM10K_QUEUE_STAT("bytes", stats.bytes),
+ FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets),
+ FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes),
};
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
@@ -149,49 +135,44 @@ enum {
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
};
-static void fm10k_add_stat_strings(u8 **p, const char *prefix,
- const struct fm10k_stats stats[],
- const unsigned int size)
+static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
+ const unsigned int size, ...)
{
unsigned int i;
for (i = 0; i < size; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s%s",
- prefix, stats[i].stat_string);
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
*p += ETH_GSTRING_LEN;
+ va_end(args);
}
}
+#define fm10k_add_stat_strings(p, stats, ...) \
+ __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int i;
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
- FM10K_NETDEV_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats);
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
- FM10K_GLOBAL_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats);
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
- FM10K_MBX_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats);
if (interface->hw.mac.type != fm10k_mac_vf)
- fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
- FM10K_PF_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats);
for (i = 0; i < interface->hw.mac.max_queues; i++) {
- char prefix[ETH_GSTRING_LEN];
-
- snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
- fm10k_add_stat_strings(&data, prefix,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
+ "tx", i);
- snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
- fm10k_add_stat_strings(&data, prefix,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
+ "rx", i);
}
}
@@ -236,9 +217,9 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
- const struct fm10k_stats stats[],
- const unsigned int size)
+static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
unsigned int i;
char *p;
@@ -267,11 +248,16 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
*((*data)++) = *(u8 *)p;
break;
default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stats[i].stat_string);
*((*data)++) = 0;
}
}
}
+#define fm10k_add_ethtool_stats(data, pointer, stats) \
+ __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
static void fm10k_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats __always_unused *stats,
u64 *data)
@@ -282,20 +268,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
fm10k_update_stats(interface);
- fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
- FM10K_NETDEV_STATS_LEN);
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats);
- fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
- FM10K_GLOBAL_STATS_LEN);
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats);
fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
- fm10k_gstrings_mbx_stats,
- FM10K_MBX_STATS_LEN);
+ fm10k_gstrings_mbx_stats);
if (interface->hw.mac.type != fm10k_mac_vf) {
fm10k_add_ethtool_stats(&data, interface,
- fm10k_gstrings_pf_stats,
- FM10K_PF_STATS_LEN);
+ fm10k_gstrings_pf_stats);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
@@ -303,13 +285,11 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
ring = interface->tx_ring[i];
fm10k_add_ethtool_stats(&data, ring,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_gstrings_queue_stats);
ring = interface->rx_ring[i];
fm10k_add_ethtool_stats(&data, ring,
- fm10k_gstrings_queue_stats,
- FM10K_QUEUE_STATS_LEN);
+ fm10k_gstrings_queue_stats);
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 30395f5e5e87..e707d717012f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index df8607097e4a..3f536541f45f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -445,15 +427,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
l2_accel = NULL;
}
- skb->protocol = eth_type_trans(skb, dev);
-
/* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c01bf30a0c9e..21021fe4f1c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_common.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 007e1dfa9b7a..56d1abff04e2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_MBX_H_
#define _FM10K_MBX_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 45793491d4ba..929f538d28bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,27 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
#include <net/udp_tunnel.h>
+#include <linux/if_macvlan.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -924,7 +907,9 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_l2_accel *l2_accel = interface->l2_accel;
struct fm10k_hw *hw = &interface->hw;
+ u16 glort;
s32 err;
int i;
@@ -992,6 +977,22 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (err)
goto err_out;
+ /* Update L2 accelerated macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ vid, set);
+ }
+ }
+
/* set VLAN ID prior to syncing/unsyncing the VLAN */
interface->vid = vid + (set ? VLAN_N_VID : 0);
@@ -1231,6 +1232,22 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_queue_mac_request(interface, glort,
hw->mac.addr, vid, true);
+
+ /* synchronize macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ vid, true);
+ }
+ }
}
/* update xcast mode before synchronizing addresses if host's mailbox
@@ -1254,7 +1271,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
glort = l2_accel->dglort + 1 + i;
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort,
sdev->dev_addr,
hw->mac.default_vid, true);
@@ -1447,7 +1464,14 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
int size = 0, i;
- u16 glort;
+ u16 vid, glort;
+
+ /* The hardware supported by fm10k only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
+ */
+ if (!macvlan_supports_dest_filter(sdev))
+ return ERR_PTR(-EMEDIUMTYPE);
/* allocate l2 accel structure if it is not available */
if (!l2_accel) {
@@ -1513,12 +1537,18 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
glort = l2_accel->dglort + 1 + i;
- if (fm10k_host_mbx_ready(interface)) {
+ if (fm10k_host_mbx_ready(interface))
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
+
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ hw->mac.default_vid, true);
+
+ for (vid = fm10k_find_next_vlan(interface, 0);
+ vid < VLAN_N_VID;
+ vid = fm10k_find_next_vlan(interface, vid))
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- hw->mac.default_vid, true);
- }
+ vid, true);
fm10k_mbx_unlock(interface);
@@ -1532,8 +1562,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
struct net_device *sdev = priv;
+ u16 vid, glort;
int i;
- u16 glort;
if (!l2_accel)
return;
@@ -1553,12 +1583,18 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
glort = l2_accel->dglort + 1 + i;
- if (fm10k_host_mbx_ready(interface)) {
+ if (fm10k_host_mbx_ready(interface))
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
+
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ hw->mac.default_vid, false);
+
+ for (vid = fm10k_find_next_vlan(interface, 0);
+ vid < VLAN_N_VID;
+ vid = fm10k_find_next_vlan(interface, vid))
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- hw->mac.default_vid, false);
- }
+ vid, false);
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c4a2b688b38b..15071e4adb98 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 7ba54c534f8c..8f0a99b6a537 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index ae81f9a16602..8e814df709d2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_PF_H_
#define _FM10K_PF_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 725ecb7abccd..2a7a40bf2b1c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_tlv.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 5d2ee759507e..160bc5b78f99 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index dd23af11e2c1..3e608e493f9d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index f06913630b39..a8519c1f0406 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,23 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index 66a66b73a2f1..787d0d570a28 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,23 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _FM10K_VF_H_
#define _FM10K_VF_H_
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 75437768a07c..14397e7e9925 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,29 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a44139c1de80..7a80652e2500 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_H_
#define _I40E_H_
@@ -334,10 +310,12 @@ struct i40e_tc_configuration {
struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
};
+#define I40E_UDP_PORT_INDEX_UNUSED 255
struct i40e_udp_port_config {
/* AdminQ command interface expects port number in Host byte order */
u16 port;
u8 type;
+ u8 filter_index;
};
/* macros related to FLX_PIT */
@@ -608,7 +586,7 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
- u64 ptp_base_adj;
+ u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
@@ -1009,6 +987,9 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len);
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
+ bool enable);
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 843fc7781ef8..ddbea79d18e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_status.h"
#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 0a8749ee9fd3..edec3df78971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 0244923edeb8..7d888e05f96f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index abed0c52e782..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d8ce4999864f..5f3b8b9ff511 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/list.h>
#include <linux/errno.h>
@@ -64,7 +40,7 @@ static struct i40e_ops i40e_lan_ops = {
/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
- * @param: clinet param struct
+ * @params: client param struct
*
**/
static
@@ -590,7 +566,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
* i40e_client_setup_qvlist
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @qv_info: queue and vector list
+ * @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
@@ -665,7 +641,7 @@ err:
* i40e_client_request_reset
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @level: reset level
+ * @reset_level: reset level
**/
static void i40e_client_request_reset(struct i40e_info *ldev,
struct i40e_client *client,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 9d464d40bc17..72994baf4941 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c0a3dae8a2db..eb2d1530d331 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_type.h"
#include "i40e_adminq.h"
@@ -1695,6 +1671,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@@ -2831,8 +2809,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@@ -2892,8 +2870,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@@ -2923,8 +2901,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@@ -3672,6 +3650,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -3752,7 +3732,6 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add in Host byte order
- * @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
@@ -3971,6 +3950,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: seid of the switching component connected to Physical Port
* @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
@@ -4314,10 +4294,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -4448,6 +4428,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@@ -4574,7 +4555,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4619,7 +4600,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@@ -4660,7 +4641,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4734,7 +4715,7 @@ phy_read_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -4801,7 +4782,7 @@ phy_write_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -4837,7 +4818,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -4872,7 +4853,6 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@@ -5082,7 +5062,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: true or false
+ * @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
+ *
* Set led's on or off when controlled by the PHY
*
**/
@@ -5371,6 +5353,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9fec728dc4b9..56bff8faf371 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_adminq.h"
#include "i40e_prototype.h"
@@ -945,6 +921,70 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
}
/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ i40e_status ret;
+ __le16 raw_mem;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
+ true, NULL);
+ i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ mem = le16_to_cpu(raw_mem);
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
* i40e_read_lldp_cfg - read LLDP Configuration data from NVM
* @hw: pointer to the HW structure
* @lldp_cfg: pointer to hold lldp configuration variables
@@ -955,21 +995,34 @@ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg)
{
i40e_status ret = 0;
- u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+ u32 mem;
if (!lldp_cfg)
return I40E_ERR_PARAM;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret)
- goto err_lldp_cfg;
+ return ret;
- ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
- sizeof(struct i40e_lldp_variables),
- (u8 *)lldp_cfg,
- true, NULL);
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, true, NULL);
i40e_release_nvm(hw);
+ if (ret)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
-err_lldp_cfg:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 4f806386cb22..2b748a60a843 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 502818e3da78..9deae9a35423 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifdef CONFIG_I40E_DCB
#include "i40e.h"
@@ -47,7 +23,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
/**
* i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
* @ets: structure to hold the ETS information
*
* Returns local IEEE ETS configuration
@@ -86,8 +62,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
/**
* i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
- * @netdev: the corresponding netdev
- * @ets: structure to hold the PFC information
+ * @dev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
*
* Returns local IEEE PFC configuration
**/
@@ -119,7 +95,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
/**
* i40e_dcbnl_getdcbx - retrieve current DCBx capability
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
*
* Returns DCBx capability features
**/
@@ -132,7 +108,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
/**
* i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
- * @netdev: the corresponding netdev
+ * @dev: the corresponding netdev
+ * @perm_addr: buffer to store the MAC address
*
* Returns the SAN MAC address used for LLDP exchange
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d494dcaf18d0..56b911a5dd8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifdef CONFIG_DEBUG_FS
@@ -36,8 +12,8 @@ static struct dentry *i40e_dbg_root;
/**
* i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the PF structure to search for the vsi
- * @seid - seid of the vsi it is searching for
+ * @pf: the PF structure to search for the vsi
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
@@ -55,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
/**
* i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the PF structure to search for the veb
- * @seid - seid of the veb it is searching for
+ * @pf: the PF structure to search for the veb
+ * @seid: seid of the veb it is searching for
**/
static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index ad6a66ccb576..334b05ff685a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DEVIDS_H_
#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index df3e60470f8b..ef4d3762bf37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_diag.h"
#include "i40e_prototype.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index be8341763475..c3340f320a18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b974482ff630..6947a2a571cb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* ethtool support for i40e */
@@ -43,13 +19,13 @@ struct i40e_stats {
}
#define I40E_NETDEV_STAT(_net_stat) \
- I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
#define I40E_PF_STAT(_name, _stat) \
- I40E_STAT(struct i40e_pf, _name, _stat)
+ I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
- I40E_STAT(struct i40e_vsi, _name, _stat)
+ I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \
- I40E_STAT(struct i40e_veb, _name, _stat)
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -66,18 +42,18 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
};
static const struct i40e_stats i40e_gstrings_veb_stats[] = {
- I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
- I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
- I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
- I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
- I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
- I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
- I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
- I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
- I40E_VEB_STAT("rx_discards", stats.rx_discards),
- I40E_VEB_STAT("tx_discards", stats.tx_discards),
- I40E_VEB_STAT("tx_errors", stats.tx_errors),
- I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+ I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
};
static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -90,6 +66,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+ I40E_VSI_STAT("tx_busy", tx_busy),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
@@ -105,76 +82,77 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
* is queried on the base PF netdev, not on the VMDq or FCoE netdev.
*/
static const struct i40e_stats i40e_gstrings_stats[] = {
- I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
- I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
- I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
- I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
- I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
- I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
- I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
- I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
- I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
- I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
- I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
- I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
- I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
- I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
- I40E_PF_STAT("tx_timeout", tx_timeout_count),
- I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
- I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
- I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
- I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
- I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
- I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
- I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
- I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
- I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
- I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
- I40E_PF_STAT("rx_size_64", stats.rx_size_64),
- I40E_PF_STAT("rx_size_127", stats.rx_size_127),
- I40E_PF_STAT("rx_size_255", stats.rx_size_255),
- I40E_PF_STAT("rx_size_511", stats.rx_size_511),
- I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
- I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
- I40E_PF_STAT("rx_size_big", stats.rx_size_big),
- I40E_PF_STAT("tx_size_64", stats.tx_size_64),
- I40E_PF_STAT("tx_size_127", stats.tx_size_127),
- I40E_PF_STAT("tx_size_255", stats.tx_size_255),
- I40E_PF_STAT("tx_size_511", stats.tx_size_511),
- I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
- I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
- I40E_PF_STAT("tx_size_big", stats.tx_size_big),
- I40E_PF_STAT("rx_undersize", stats.rx_undersize),
- I40E_PF_STAT("rx_fragments", stats.rx_fragments),
- I40E_PF_STAT("rx_oversize", stats.rx_oversize),
- I40E_PF_STAT("rx_jabber", stats.rx_jabber),
- I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
- I40E_PF_STAT("arq_overflows", arq_overflows),
- I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
- I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
- I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
- I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
- I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
- I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
- I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
- I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
+ I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
+ I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
+ I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
+ I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("port.arq_overflows", arq_overflows),
+ I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
+ I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
+ I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
/* LPI stats */
- I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
- I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
- I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
- I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
+ I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
};
-#define I40E_QUEUE_STATS_LEN(n) \
- (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+/* We use num_tx_queues here as a proxy for the maximum number of queues
+ * available because we always allocate queues symmetrically.
+ */
+#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues)
+#define I40E_QUEUE_STATS_LEN(n) \
+ (I40E_MAX_NUM_QUEUES(n) \
* 2 /* Tx and Rx together */ \
* (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
-#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
@@ -977,7 +955,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseCR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseSR_Full))
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
20000baseKR2_Full))
@@ -1079,6 +1059,9 @@ static int i40e_nway_reset(struct net_device *netdev)
/**
* i40e_get_pauseparam - Get Flow Control status
+ * @netdev: netdevice structure
+ * @pause: buffer to return pause parameters
+ *
* Return tx/rx-pause status
**/
static void i40e_get_pauseparam(struct net_device *netdev,
@@ -1677,6 +1660,32 @@ done:
return err;
}
+/**
+ * i40e_get_stats_count - return the stats count for a device
+ * @netdev: the netdev to return the count for
+ *
+ * Returns the total number of statistics for this netdev. Note that even
+ * though this is a function, it is required that the count for a specific
+ * netdev must never change. Basing the count on static values such as the
+ * maximum number of queues or the device type is ok. However, the API for
+ * obtaining stats is *not* safe against changes based on non-static
+ * values such as the *current* number of queues, or runtime flags.
+ *
+ * If a statistic is not always enabled, return it as part of the count
+ * anyways, always return its string, and report its value as zero.
+ **/
+static int i40e_get_stats_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+ else
+ return I40E_VSI_STATS_LEN(netdev);
+}
+
static int i40e_get_sset_count(struct net_device *netdev, int sset)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -1687,16 +1696,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
- int len = I40E_PF_STATS_LEN(netdev);
-
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
- len += I40E_VEB_STATS_TOTAL;
- return len;
- } else {
- return I40E_VSI_STATS_LEN(netdev);
- }
+ return i40e_get_stats_count(netdev);
case ETH_SS_PRIV_FLAGS:
return I40E_PRIV_FLAGS_STR_LEN +
(pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
@@ -1705,6 +1705,20 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
}
+/**
+ * i40e_get_ethtool_stats - copy stat values into supplied buffer
+ * @netdev: the netdev to collect stats for
+ * @stats: ethtool stats command structure
+ * @data: ethtool supplied buffer
+ *
+ * Copy the stats values for this netdev into the buffer. Expects data to be
+ * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
+ * statistics must be copied in a static order, and the count must not change
+ * for a given netdev. See i40e_get_stats_count for more details.
+ *
+ * If a statistic is not currently valid (such as a disabled queue), this
+ * function reports its value as zero.
+ **/
static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1712,47 +1726,54 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- unsigned int j;
- int i = 0;
+ unsigned int i;
char *p;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start;
i40e_update_stats(vsi);
- for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
- p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
- p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
- if (!tx_ring)
+ if (!tx_ring) {
+ /* Bump the stat counter to skip these stats, and make
+ * sure the memory is zero'd
+ */
+ *(data++) = 0;
+ *(data++) = 0;
+ *(data++) = 0;
+ *(data++) = 0;
continue;
+ }
/* process Tx ring statistics */
do {
start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- data[i] = tx_ring->stats.packets;
- data[i + 1] = tx_ring->stats.bytes;
+ data[0] = tx_ring->stats.packets;
+ data[1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
- i += 2;
+ data += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i] = rx_ring->stats.packets;
- data[i + 1] = rx_ring->stats.bytes;
+ data[0] = rx_ring->stats.packets;
+ data[1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
- i += 2;
+ data += 2;
}
rcu_read_unlock();
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
@@ -1762,38 +1783,131 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
struct i40e_veb *veb = pf->veb[pf->lan_veb];
- for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
p = (char *)veb;
- p += i40e_gstrings_veb_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ p += i40e_gstrings_veb_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
- data[i++] = veb->tc_stats.tc_tx_packets[j];
- data[i++] = veb->tc_stats.tc_tx_bytes[j];
- data[i++] = veb->tc_stats.tc_rx_packets[j];
- data[i++] = veb->tc_stats.tc_rx_bytes[j];
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ *(data++) = veb->tc_stats.tc_tx_packets[i];
+ *(data++) = veb->tc_stats.tc_tx_bytes[i];
+ *(data++) = veb->tc_stats.tc_rx_packets[i];
+ *(data++) = veb->tc_stats.tc_rx_bytes[i];
}
+ } else {
+ data += I40E_VEB_STATS_TOTAL;
}
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ p = (char *)pf + i40e_gstrings_stats[i].stat_offset;
+ *(data++) = (i40e_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ *(data++) = pf->stats.priority_xon_tx[i];
+ *(data++) = pf->stats.priority_xoff_tx[i];
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ *(data++) = pf->stats.priority_xon_rx[i];
+ *(data++) = pf->stats.priority_xoff_rx[i];
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ *(data++) = pf->stats.priority_xon_2_xoff[i];
}
-static void i40e_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+/**
+ * i40e_get_stat_strings - copy stat strings into supplied buffer
+ * @netdev: the netdev to collect strings for
+ * @data: supplied buffer to copy strings into
+ *
+ * Copy the strings related to stats for this netdev. Expects data to be
+ * pre-allocated with the size reported by i40e_get_stats_count. Note that the
+ * strings must be copied in a static order and the total count must not
+ * change for a given netdev. See i40e_get_stats_count for more details.
+ **/
+static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ unsigned int i;
+ u8 *p = data;
+
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_net_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ return;
+
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_veb_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_packets", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_bytes", i);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_stats[i].stat_string);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ data += ETH_GSTRING_LEN;
+ }
+
+ WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+ "stat strings count mismatch!");
+}
+
+static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1801,98 +1915,33 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
char *p = (char *)data;
unsigned int i;
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ if (pf->hw.pf_id != 0)
+ return;
+ for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gl_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, i40e_gstrings_test,
I40E_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_net_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_misc_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
-
- if ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
- for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "veb.%s",
- i40e_gstrings_veb_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_tx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_tx_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_rx_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "veb.tc_%d_rx_bytes", i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%d_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%d_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%d_xon_2_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+ i40e_get_stat_strings(netdev, data);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
- if (pf->hw.pf_id != 0)
- break;
- for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
+ i40e_get_priv_flag_strings(netdev, data);
break;
default:
break;
@@ -2550,7 +2599,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
/**
* i40e_check_mask - Check whether a mask field is set
* @mask: the full mask value
- * @field; mask of the field to check
+ * @field: mask of the field to check
*
* If the given mask is fully set, return positive value. If the mask for the
* field is fully unset, return zero. Otherwise return a negative error code.
@@ -2621,6 +2670,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
/**
* i40e_fill_rx_flow_user_data - Fill in user-defined data field
* @fsp: pointer to rx_flow specification
+ * @data: pointer to return userdef data
*
* Reads the userdef data structure and properly fills in the user defined
* fields of the rx_flow_spec.
@@ -2799,6 +2849,7 @@ no_input_set:
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule data
*
* Returns Success if the command is supported.
**/
@@ -2840,7 +2891,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
* @nfc: pointer to user request
- * @i_setc bits currently set
+ * @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
@@ -2885,7 +2936,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
+ * @nfc: ethtool rxnfc command
*
* Returns Success if the flow input set is supported.
**/
@@ -3284,7 +3335,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list,
* __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
* @pf: Pointer to the PF structure
* @flex_pit_list: list of flexible src offsets in use
- * #flex_pit_start: index to first entry for this section of the table
+ * @flex_pit_start: index to first entry for this section of the table
*
* In order to handle flexible data, the hardware uses a table of values
* called the FLX_PIT table. This table is used to indicate which sections of
@@ -3398,7 +3449,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
/**
* i40e_flow_str - Converts a flow_type into a human readable string
- * @flow_type: the flow type from a flow specification
+ * @fsp: the flow specification
*
* Currently only flow types we support are included here, and the string
* value attempts to match what ethtool would use to configure this flow type.
@@ -4103,7 +4154,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
/**
* i40e_get_channels - Get the current channels enabled and max supported etc.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
* @ch: ethtool channels structure
*
* We don't support separate tx and rx queues as channels. The other count
@@ -4112,7 +4163,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
* q_vectors since we support a lot more queue pairs than q_vectors.
**/
static void i40e_get_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+ struct ethtool_channels *ch)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -4131,14 +4182,14 @@ static void i40e_get_channels(struct net_device *dev,
/**
* i40e_set_channels - Set the new channels count.
- * @netdev: network interface device structure
+ * @dev: network interface device structure
* @ch: ethtool channels structure
*
* The new channels count may not be the same as requested by the user
* since it gets rounded down to a power of 2 value.
**/
static int i40e_set_channels(struct net_device *dev,
- struct ethtool_channels *ch)
+ struct ethtool_channels *ch)
{
const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -4273,6 +4324,7 @@ out:
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function to use
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 6d4b590f851b..19ce93d7fd0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_osdep.h"
#include "i40e_register.h"
@@ -198,7 +174,6 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
- * @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 7b5fd33d70ae..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index cd40dc487b38..994011c38fb4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_osdep.h"
#include "i40e_register.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 79e1396735d9..c46a2c449e60 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 16229998fb1e..b5daa5c9c7de 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/etherdevice.h>
#include <linux/of_net.h>
@@ -278,8 +254,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
/**
* i40e_find_vsi_from_id - searches for the vsi with the given id
- * @pf - the pf structure to search for the vsi
- * @id - id of the vsi it is searching for
+ * @pf: the pf structure to search for the vsi
+ * @id: id of the vsi it is searching for
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
@@ -435,6 +411,7 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
+ * @stats: data structure to store statistics
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
@@ -2027,7 +2004,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
* from firmware
* @count: Number of filters added
* @add_list: return data from fw
- * @head: pointer to first filter in current batch
+ * @add_head: pointer to first filter in current batch
*
* MAC filter entries from list were slated to be added to device. Returns
* number of successful filters. Note that 0 does NOT mean success!
@@ -2134,6 +2111,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
/**
* i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
* @vsi: pointer to the VSI
+ * @vsi_name: the VSI name
* @f: filter data
*
* This function sets or clears the promiscuous broadcast flags for VLAN
@@ -2840,6 +2818,7 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
/**
* i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
* @netdev: network interface to be adjusted
+ * @proto: unused protocol value
* @vid: vlan id to be added
*
* net_device_ops implementation for adding vlan ids
@@ -2862,8 +2841,26 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
}
/**
+ * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol value
+ * @vid: vlan id to be added
+ **/
+static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vid >= VLAN_N_VID)
+ return;
+ set_bit(vid, vsi->active_vlans);
+}
+
+/**
* i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
+ * @proto: unused protocol value
* @vid: vlan id to be removed
*
* net_device_ops implementation for removing vlan ids
@@ -2902,8 +2899,8 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
i40e_vlan_stripping_disable(vsi);
for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
- i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
- vid);
+ i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
}
/**
@@ -3485,7 +3482,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
/**
* i40e_enable_misc_int_causes - enable the non-queue interrupts
- * @hw: ptr to the hardware info
+ * @pf: pointer to private device data structure
**/
static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
{
@@ -4255,8 +4252,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
* @is_xdp: true if the queue is used for XDP
* @enable: start or stop the queue
**/
-static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
- bool is_xdp, bool enable)
+int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
{
int ret;
@@ -4301,7 +4298,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
if (ret)
break;
}
-
return ret;
}
@@ -4340,9 +4336,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
* @pf_q: the PF queue to configure
* @enable: start or stop the queue
*
- * This function enables or disables a single queue. Note that any delay
- * required after the operation is expected to be handled by the caller of
- * this function.
+ * This function enables or disables a single queue. Note that
+ * any delay required after the operation is expected to be
+ * handled by the caller of this function.
**/
static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
{
@@ -4372,6 +4368,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_wait_rx_q
+ * @pf: the PF structure
+ * @pf_q: queue being configured
+ * @enable: start or stop the rings
+ *
+ * This function enables or disables a single queue along with waiting
+ * for the change to finish. The caller of this function should handle
+ * the delays needed in the case of disabling queues.
+ **/
+int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ int ret = 0;
+
+ i40e_control_rx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4383,10 +4403,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- i40e_control_rx_q(pf, pf_q, enable);
-
- /* wait for the change to finish */
- ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ ret = i40e_control_wait_rx_q(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"VSI seid %d Rx ring %d %sable timeout\n",
@@ -5096,7 +5113,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
* i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
* @vsi: the VSI being configured
* @enabled_tc: TC bitmap
- * @bw_credits: BW shared credits per TC
+ * @bw_share: BW shared credits per TC
*
* Returns 0 on success, negative value on failure
**/
@@ -6353,6 +6370,7 @@ out:
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
+ * @isup: true of link is up, false otherwise
*/
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
@@ -7212,8 +7230,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
if (mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
- mask->dst = be32_to_cpu(mask->dst);
- dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
+ dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
&mask->dst);
return I40E_ERR_CONFIG;
}
@@ -7223,8 +7240,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
if (mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
- mask->src = be32_to_cpu(mask->src);
- dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
+ dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
&mask->src);
return I40E_ERR_CONFIG;
}
@@ -9691,9 +9707,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
+static const char *i40e_tunnel_name(u8 type)
{
- switch (port->type) {
+ switch (type) {
case UDP_TUNNEL_TYPE_VXLAN:
return "vxlan";
case UDP_TUNNEL_TYPE_GENEVE:
@@ -9727,37 +9743,68 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf)
static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ u8 filter_index, type;
u16 port;
int i;
if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
return;
+ /* acquire RTNL to maintain state of flags and port requests */
+ rtnl_lock();
+
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
+ struct i40e_udp_port_config *udp_port;
+ i40e_status ret = 0;
+
+ udp_port = &pf->udp_ports[i];
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].port;
+
+ port = READ_ONCE(udp_port->port);
+ type = READ_ONCE(udp_port->type);
+ filter_index = READ_ONCE(udp_port->filter_index);
+
+ /* release RTNL while we wait on AQ command */
+ rtnl_unlock();
+
if (port)
ret = i40e_aq_add_udp_tunnel(hw, port,
- pf->udp_ports[i].type,
- NULL, NULL);
- else
- ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
+ type,
+ &filter_index,
+ NULL);
+ else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
+ ret = i40e_aq_del_udp_tunnel(hw, filter_index,
+ NULL);
+
+ /* reacquire RTNL so we can update filter_index */
+ rtnl_lock();
if (ret) {
dev_info(&pf->pdev->dev,
"%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(&pf->udp_ports[i]),
+ i40e_tunnel_name(type),
port ? "add" : "delete",
- port, i,
+ port,
+ filter_index,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].port = 0;
+ if (port) {
+ /* failed to add, just reset port,
+ * drop pending bit for any deletion
+ */
+ udp_port->port = 0;
+ pf->pending_udp_bitmap &= ~BIT_ULL(i);
+ }
+ } else if (port) {
+ /* record filter index on success */
+ udp_port->filter_index = filter_index;
}
}
}
+
+ rtnl_unlock();
}
/**
@@ -10004,7 +10051,7 @@ unlock_pf:
/**
* i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
- * @type: VSI pointer
+ * @vsi: VSI pointer
* @free_qvectors: a bool to specify if q_vectors need to be freed.
*
* On error: returns error code (negative)
@@ -10279,21 +10326,28 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* any vectors left over go for VMDq support */
if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
- int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
- int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
-
if (!vectors_left) {
pf->num_vmdq_msix = 0;
pf->num_vmdq_qps = 0;
} else {
+ int vmdq_vecs_wanted =
+ pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs =
+ min_t(int, vectors_left, vmdq_vecs_wanted);
+
/* if we're short on vectors for what's desired, we limit
* the queues per vmdq. If this is still more than are
* available, the user will need to change the number of
* queues/vectors used by the PF later with the ethtool
* channels command
*/
- if (vmdq_vecs < vmdq_vecs_wanted)
+ if (vectors_left < vmdq_vecs_wanted) {
pf->num_vmdq_qps = 1;
+ vmdq_vecs_wanted = pf->num_vmdq_vsis;
+ vmdq_vecs = min_t(int,
+ vectors_left,
+ vmdq_vecs_wanted);
+ }
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget += vmdq_vecs;
@@ -10800,7 +10854,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @vsi: Pointer to VSI structure
* @seed: Buffer to store the keys
* @lut: Buffer to store the lookup table entries
- * lut_size: Size of buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
*
* Returns 0 on success, negative on failure
*/
@@ -11374,6 +11428,11 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ /* Do not report ports with pending deletions as
+ * being available.
+ */
+ if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
+ continue;
if (pf->udp_ports[i].port == port)
return i;
}
@@ -11428,6 +11487,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].port = port;
+ pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
}
@@ -11469,7 +11529,12 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
* and make it pending
*/
pf->udp_ports[idx].port = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
+
+ /* Toggle pending bit instead of setting it. This way if we are
+ * deleting a port that has yet to be added we just clear the pending
+ * bit and don't have to worry about it.
+ */
+ pf->pending_udp_bitmap ^= BIT_ULL(idx);
set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
return;
@@ -11500,6 +11565,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
+ * @vid: VLAN ID
* @flags: instructions from stack about fdb operation
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -11545,6 +11611,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* i40e_ndo_bridge_setlink - Set the hardware bridge mode
* @dev: the netdev being configured
* @nlh: RTNL message
+ * @flags: bridge flags
*
* Inserts a new hardware bridge if not already created and
* enables the bridging mode requested (VEB or VEPA). If the
@@ -14118,6 +14185,7 @@ static void i40e_remove(struct pci_dev *pdev)
/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
+ * @error: the type of PCI error
*
* Called to warn that something happened and the error handling steps
* are in progress. Allows the driver to quiesce things, be ready for
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index ba9687c03795..0299e5bbb902 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_prototype.h"
@@ -1173,6 +1149,7 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
* i40e_nvmupd_check_wait_event - handle NVM update operation events
* @hw: pointer to the hardware structure
* @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 9c3c3b0d3ac4..a07574bff550 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2ec24188d6e2..3170655cdeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5b47dd1f75a5..35f2866b38c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e.h"
#include <linux/ptp_classify.h>
@@ -40,9 +16,9 @@
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
-#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
-#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
-#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL_MULT 2
+#define I40E_PTP_1GB_INCVAL_MULT 20
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
@@ -130,17 +106,24 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
ppb = -ppb;
}
- smp_mb(); /* Force any pending update before accessing. */
- adj = READ_ONCE(pf->ptp_base_adj);
-
- freq = adj;
+ freq = I40E_PTP_40GB_INCVAL;
freq *= ppb;
diff = div_u64(freq, 1000000000ULL);
if (neg_adj)
- adj -= diff;
+ adj = I40E_PTP_40GB_INCVAL - diff;
else
- adj += diff;
+ adj = I40E_PTP_40GB_INCVAL + diff;
+
+ /* At some link speeds, the base incval is so large that directly
+ * multiplying by ppb would result in arithmetic overflow even when
+ * using a u64. Avoid this by instead calculating the new incval
+ * always in terms of the 40GbE clock rate and then multiplying by the
+ * link speed factor afterwards. This does result in slightly lower
+ * precision at lower link speeds, but it is fairly minor.
+ */
+ smp_mb(); /* Force any pending update before accessing. */
+ adj *= READ_ONCE(pf->ptp_adj_mult);
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -334,10 +317,12 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
* This watchdog task is run periodically to make sure that we clear the Tx
* timestamp logic if we don't obtain a timestamp in a reasonable amount of
* time. It is unexpected in the normal case but if it occurs it results in
- * permanently prevent timestamps of future packets
+ * permanently preventing timestamps of future packets.
**/
void i40e_ptp_tx_hang(struct i40e_pf *pf)
{
+ struct sk_buff *skb;
+
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
return;
@@ -350,9 +335,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
* within a second it is reasonable to assume that we never will.
*/
if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
+ skb = pf->ptp_tx_skb;
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+
+ /* Free the skb after we clear the bitlock */
+ dev_kfree_skb_any(skb);
pf->tx_hwtstamp_timeouts++;
}
}
@@ -462,6 +450,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
struct i40e_link_status *hw_link_info;
struct i40e_hw *hw = &pf->hw;
u64 incval;
+ u32 mult;
hw_link_info = &hw->phy.link_info;
@@ -469,10 +458,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
switch (hw_link_info->link_speed) {
case I40E_LINK_SPEED_10GB:
- incval = I40E_PTP_10GB_INCVAL;
+ mult = I40E_PTP_10GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_1GB:
- incval = I40E_PTP_1GB_INCVAL;
+ mult = I40E_PTP_1GB_INCVAL_MULT;
break;
case I40E_LINK_SPEED_100MB:
{
@@ -483,15 +472,20 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
"1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
warn_once++;
}
- incval = 0;
+ mult = 0;
break;
}
case I40E_LINK_SPEED_40GB:
default:
- incval = I40E_PTP_40GB_INCVAL;
+ mult = 1;
break;
}
+ /* The increment value is calculated by taking the base 40GbE incvalue
+ * and multiplying it by a factor based on the link speed.
+ */
+ incval = I40E_PTP_40GB_INCVAL * mult;
+
/* Write the new increment value into the increment register. The
* hardware will not update the clock until both registers have been
* written.
@@ -500,14 +494,14 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- WRITE_ONCE(pf->ptp_base_adj, incval);
+ WRITE_ONCE(pf->ptp_adj_mult, mult);
smp_mb(); /* Force the above update. */
}
/**
* i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
* @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Obtain the current hardware timestamping settigs as requested. To do this,
* keep a shadow copy of the timestamp settings rather than attempting to
@@ -651,7 +645,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
/**
* i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
* @pf: Board private structure
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Respond to the user filter requests and make the appropriate hardware
* changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
@@ -805,9 +799,11 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_rx = false;
if (pf->ptp_tx_skb) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
+ struct sk_buff *skb = pf->ptp_tx_skb;
+
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
+ dev_kfree_skb_any(skb);
}
if (pf->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index b3e206e49cc2..52e3680c57f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 10c86f63dc52..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 410ba13bcf21..424f02077e2e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* Modeled on trace-events-sample.h */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 87fb27ab9c24..5efa68de935b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
#include <net/busy_poll.h>
@@ -495,7 +471,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
/**
* i40e_add_del_fdir - Build raw packets to add/del fdir filter
* @vsi: pointer to the targeted VSI
- * @cmd: command to get or set RX flow classification rules
+ * @input: filter to add or delete
* @add: true adds a filter, false removes it
*
**/
@@ -713,7 +689,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
+ * @ring: the ring of descriptors
* @in_sw: use SW variables
*
* Since there is no access to the ring head register
@@ -1795,6 +1771,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4bf318b8be85..fdd2c55f03a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bfb80092b352..7df969c59855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -1318,7 +1294,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1337,6 +1314,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
#define I40E_SR_OCP_ENABLED BIT(15)
@@ -1454,7 +1433,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xD
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 35173cbe80f7..c6d24eaede18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e.h"
@@ -32,8 +8,8 @@
/**
* i40e_vc_vf_broadcast
* @pf: pointer to the PF structure
- * @opcode: operation code
- * @retval: return value
+ * @v_opcode: operation code
+ * @v_retval: return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1663,6 +1639,7 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
/**
* i40e_vc_get_version_msg
* @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
*
* called from the VF to request the API version used by the PF
**/
@@ -1706,7 +1683,6 @@ static void i40e_del_qch(struct i40e_vf *vf)
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to request its resources
**/
@@ -1830,8 +1806,6 @@ err:
/**
* i40e_vc_reset_vf_msg
* @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to reset itself,
* unlike other virtchnl messages, PF driver
@@ -2180,6 +2154,51 @@ error_param:
}
/**
+ * i40e_ctrl_vf_tx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ vsi->base_queue + q_id,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vf_rx_rings
+ * @vsi: the SRIOV VSI being configured
+ * @q_map: bit map of the queues to be enabled
+ * @enable: start or stop the queue
+ **/
+static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ u16 q_id;
+
+ for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
+ ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
+ enable);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2211,8 +2230,17 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ true)) {
aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ true)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
/* need to start the rings for additional ADq VSI's as well */
if (vf->adq_enabled) {
@@ -2260,8 +2288,17 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
-
+ /* Use the queue bit map sent by the VF */
+ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
+ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
+ false)) {
+ aq_ret = I40E_ERR_TIMEOUT;
+ goto error_param;
+ }
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
@@ -3556,15 +3593,16 @@ err:
* i40e_vc_process_vf_msg
* @pf: pointer to the PF structure
* @vf_id: source VF id
+ * @v_opcode: operation code
+ * @v_retval: unused return value code
* @msg: pointer to the msg buffer
* @msglen: msg length
- * @msghndl: msg handle
*
* called from the common aeq/arq handler to
* process request from VF
**/
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
+ u32 __always_unused v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
@@ -4015,7 +4053,8 @@ error_pvid:
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @tx_rate: Tx rate
+ * @min_tx_rate: Minimum Tx rate
+ * @max_tx_rate: Maximum Tx rate
*
* configure VF Tx rate
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 57f727bb9e36..bf67d62e2b5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_VIRTCHNL_PF_H_
#define _I40E_VIRTCHNL_PF_H_
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 1e89c5487676..3c5c6e962280 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,29 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+# Copyright(c) 2013 - 2018 Intel Corporation.
#
## Makefile for the Intel(R) 40GbE VF driver
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 6fd677efa9da..c355120dfdfd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_status.h"
#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a7137c165256..1f264b9b6805 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 439e71882049..aa81e87cd471 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index 7e0fddd8af36..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 67140cdbcd7a..9cef54971312 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e_type.h"
#include "i40e_adminq.h"
@@ -1255,6 +1231,7 @@ i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 352dd3f3eb6a..f300bf271824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_DEVIDS_H_
#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 7432596164f4..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index ddac0e4908d3..82b00f70a632 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 8668ad6c1a65..3ddddb46455b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_OSDEP_H_
#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 72501bd0f1a9..a358f4b9d5aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index c9c935659758..49e1f57d99cc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 0d7993ecb99a..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_STATUS_H_
#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
index ece01dd12a3c..d7a4e68820a8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* Modeled on trace-events-sample.h */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 12bd937861e7..a9730711e257 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/prefetch.h>
#include <net/busy_poll.h>
@@ -129,7 +105,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40evf_get_tx_pending - how many Tx descriptors not processed
- * @tx_ring: the ring of descriptors
+ * @ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
@@ -1070,6 +1046,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
+ * @skb: skb currently being received and modified
+ * @rx_ptype: Rx packet type
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 5790897eae2e..3b5a63b3236e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 449de4b0058e..094387db3c11 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -1257,7 +1233,8 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
@@ -1273,6 +1250,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1386,6 +1366,10 @@ enum i40e_reset_type {
I40E_RESET_EMPR = 3,
};
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3a7a1e77bf39..96e537a35000 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40EVF_H_
#define _I40EVF_H_
@@ -105,7 +81,6 @@ struct i40e_vsi {
#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define MAX_QUEUES 16
#define I40EVF_MAX_REQ_QUEUES 4
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index da60ce12b33d..3cc9d60d0d72 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
#include <linux/list.h>
#include <linux/errno.h>
@@ -176,7 +178,6 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
/**
* i40evf_client_add_instance - add a client instance to the instance list
* @adapter: pointer to the board struct
- * @client: pointer to a client struct in the client list.
*
* Returns cinst ptr on success, NULL on failure
**/
@@ -234,7 +235,6 @@ out:
/**
* i40evf_client_del_instance - removes a client instance from the list
* @adapter: pointer to the board struct
- * @client: pointer to the client struct
*
**/
static
@@ -438,7 +438,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
* i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
- * @qv_info: queue and vector list
+ * @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
index 15a10da5bd4a..5585f362048a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _I40E_CLIENT_H_
-#define _I40E_CLIENT_H_
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40EVF_CLIENT_H_
+#define _I40EVF_CLIENT_H_
#define I40EVF_CLIENT_STR_LENGTH 10
@@ -164,4 +166,4 @@ struct i40e_client {
/* used by clients */
int i40evf_register_client(struct i40e_client *client);
int i40evf_unregister_client(struct i40e_client *client);
-#endif /* _I40E_CLIENT_H_ */
+#endif /* _I40EVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dc4cde274fb8..69efe0aec76a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
/* ethtool support for i40evf */
#include "i40evf.h"
@@ -226,7 +202,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
/**
* i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
+ * @netdev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
@@ -253,7 +229,7 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev)
/**
* i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
+ * @netdev: network interface device structure
* @flags: bit flags to be set
**/
static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
@@ -627,6 +603,7 @@ static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
*
* Returns Success if the command is supported.
**/
@@ -746,6 +723,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function in use
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
@@ -774,6 +752,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
+ * @hfunc: hash function to use
*
* Returns -EINVAL if the table specifies an inavlid queue id, otherwise
* returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5f71532be7f1..a7b87f935411 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40evf.h"
#include "i40e_prototype.h"
@@ -473,6 +449,7 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
+ * @basename: device basename
*
* Allocates MSI-X vectors for tx and rx handling, and requests
* interrupts from the kernel.
@@ -705,7 +682,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto clearout;
@@ -745,6 +722,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
/**
* i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
* @netdev: network device struct
+ * @proto: unused protocol data
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
@@ -762,6 +740,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
/**
* i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
* @netdev: network device struct
+ * @proto: unused protocol data
* @vid: VLAN tag
**/
static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
@@ -1946,7 +1925,8 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = (adapter->state == __I40EVF_RUNNING);
+ running = ((adapter->state == __I40EVF_RUNNING) ||
+ (adapter->state == __I40EVF_RESETTING));
if (running) {
netif_carrier_off(netdev);
@@ -2352,7 +2332,7 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > MAX_QUEUES)
+ if (num_qps > I40EVF_MAX_REQ_QUEUES)
return -EINVAL;
ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
@@ -3160,7 +3140,7 @@ static int i40evf_set_features(struct net_device *netdev,
/**
* i40evf_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
* @features: Offload features that the stack believes apply
**/
static netdev_features_t i40evf_features_check(struct sk_buff *skb,
@@ -3378,6 +3358,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ /* Do not turn on offloads when they are requested to be turned off.
+ * TSO needs minimum 576 bytes to work correctly.
+ */
+ if (netdev->wanted_features) {
+ if (!(netdev->wanted_features & NETIF_F_TSO) ||
+ netdev->mtu < 576)
+ netdev->features &= ~NETIF_F_TSO;
+ if (!(netdev->wanted_features & NETIF_F_TSO6) ||
+ netdev->mtu < 576)
+ netdev->features &= ~NETIF_F_TSO6;
+ if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
+ netdev->features &= ~NETIF_F_TSO_ECN;
+ if (!(netdev->wanted_features & NETIF_F_GRO))
+ netdev->features &= ~NETIF_F_GRO;
+ if (!(netdev->wanted_features & NETIF_F_GSO))
+ netdev->features &= ~NETIF_F_GSO;
+ }
+
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.back = adapter;
@@ -3692,7 +3690,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ I40EVF_MAX_REQ_QUEUES);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 26a59890532f..565677de5ba3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40evf.h"
#include "i40e_prototype.h"
@@ -179,8 +155,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
/**
* i40evf_get_vf_config
- * @hw: pointer to the hardware structure
- * @len: length of buffer
+ * @adapter: private adapter structure
*
* Get VF configuration from PF and populate hw structure. Must be called after
* admin queue is initialized. Busy waits until response is received from PF,
@@ -423,8 +398,6 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
/**
* i40evf_add_ether_addrs
* @adapter: adapter structure
- * @addrs: the MAC address filters to add (contiguous)
- * @count: number of filters
*
* Request that the PF add one or more addresses to our filters.
**/
@@ -497,8 +470,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
/**
* i40evf_del_ether_addrs
* @adapter: adapter structure
- * @addrs: the MAC address filters to remove (contiguous)
- * @count: number of filtes
*
* Request that the PF remove one or more addresses from our filters.
**/
@@ -571,8 +542,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
/**
* i40evf_add_vlans
* @adapter: adapter structure
- * @vlans: the VLANs to add
- * @count: number of VLANs
*
* Request that the PF add one or more VLAN filters to our VSI.
**/
@@ -643,8 +612,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
/**
* i40evf_del_vlans
* @adapter: adapter structure
- * @vlans: the VLANs to remove
- * @count: number of VLANs
*
* Request that the PF remove one or more VLAN filters from our VSI.
**/
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5b13ca1bd85f..7541ec2270b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
- /* Action type = 5 - Large Action */
+ /* Action type = 5 - Generic Value */
#define ICE_LG_ACT_GENERIC 0x5
#define ICE_LG_ACT_GENERIC_VALUE_S 3
#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
@@ -1049,7 +1049,9 @@ struct ice_aqc_set_event_mask {
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
- u8 cmd_flags;
+ __le16 offset_low;
+ u8 offset_high;
+ u8 cmd_flags;
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
@@ -1058,12 +1060,11 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
- u8 module_typeid;
- __le16 length;
+ __le16 module_typeid;
+ __le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
+ __le32 addr_high;
+ __le32 addr_low;
};
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 21977ec984c4..71d032cc5fa7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_aq_desc desc;
enum ice_status status;
u16 flags;
+ u8 i;
cmd = &desc.params.mac_read;
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
return ICE_ERR_CFG;
}
- ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
- ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
+ /* A single port can report up to two (LAN and WoL) addresses */
+ for (i = 0; i < cmd->num_addr; i++)
+ if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
+ ether_addr_copy(hw->port_info->mac.lan_addr,
+ resp[i].mac_addr);
+ ether_addr_copy(hw->port_info->mac.perm_addr,
+ resp[i].mac_addr);
+ break;
+ }
+
return 0;
}
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- /* Get port MAC information */
- mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
- mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
+ /* Get MAC information */
+ /* A single port can report up to two (LAN and WoL) addresses */
+ mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
+ sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
status = ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 5909a4407e38..7c511f144ed6 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -1014,10 +1014,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
+ cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
- cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Receive Queue Event received with error 0x%x\n",
cq->rq_last_status);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1b9e2ef48a9d..499904874b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,8 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_INTEVENT_S 0
-#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 210b7910f1cd..5299caf55a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
- if (!(oicr & PFINT_OICR_INTEVENT_M))
- goto ena_intr;
-
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
/* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
-ena_intr:
/* re-enable interrupt causes that are not handled during this pass */
wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fa7a69ac92b0..92da0a626ce0 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -16,7 +16,7 @@
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
-ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
@@ -33,8 +33,9 @@ ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
- cmd->module_typeid = module_typeid;
- cmd->offset = cpu_to_le32(offset);
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f16ff3e4a840..2e6c1d92cc88 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 num_added = 0;
u32 temp;
+ *num_nodes_added = 0;
+
if (!num_nodes)
return status;
if (!parent || layer < hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
- *num_nodes_added = 0;
-
/* max children per node per layer */
max_child_nodes =
le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c48583e98ac1..394c1e0656b9 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2014 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
#
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index dd9b6cac220d..b13b42e5a1d9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* e1000_82575
* e1000_82576
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e53ebe97d709..6ad775b1a4c5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 98534f765e0e..252440a418dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -491,6 +470,8 @@
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAH_ASEL_SRC_ADDR 0x00010000
+#define E1000_RAH_QSEL_ENABLE 0x10000000
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
#define E1000_RAH_POOL_MASK 0x03FC0000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ff835e1e853d..5d87957b2627 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,25 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6f548247e6d8..c54ebedca6da 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* e1000_i210
* e1000_i211
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 56f015ccb206..5c437fdc49ee 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298afa0d9159..79ee0a747260 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 04d80c765aee..6e110f28f922 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index ef42f1689b3b..46debd991bfe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index 4f0ecd28354d..178e60ec71d4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e4596f151cd4..09f4dcb09632 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,25 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index dde68cd54a53..091cddf4ada8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4ec61243da82..2be0e762ec69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include <linux/if_ether.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 856d2cda0643..5894e4b1d0a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e8fa8c6530e0..0ad737d2f289 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8dbc399b345e..9643b5b3d444 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,26 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -442,6 +421,8 @@ struct hwmon_buff {
enum igb_filter_match_flags {
IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
IGB_FILTER_FLAG_VLAN_TCI = 0x2,
+ IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
+ IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8,
};
#define IGB_MAX_RXNFC_FILTERS 16
@@ -456,11 +437,14 @@ struct igb_nfc_input {
u8 match_flags;
__be16 etype;
__be16 vlan_tci;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
};
struct igb_nfc_filter {
struct hlist_node nfc_node;
struct igb_nfc_input filter;
+ unsigned long cookie;
u16 etype_reg_index;
u16 sw_idx;
u16 action;
@@ -474,6 +458,8 @@ struct igb_mac_addr {
#define IGB_MAC_STATE_DEFAULT 0x1
#define IGB_MAC_STATE_IN_USE 0x2
+#define IGB_MAC_STATE_SRC_ADDR 0x4
+#define IGB_MAC_STATE_QUEUE_STEERING 0x8
/* board specific private data structure */
struct igb_adapter {
@@ -598,6 +584,7 @@ struct igb_adapter {
/* RX network flow classification support */
struct hlist_head nfc_filter_list;
+ struct hlist_head cls_flower_list;
unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
@@ -739,4 +726,9 @@ int igb_add_filter(struct igb_adapter *adapter,
int igb_erase_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
+int igb_add_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+int igb_del_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e77ba0d5866d..2d798499d35e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
/* ethtool support for igb */
@@ -2495,6 +2474,23 @@ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
}
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
return 0;
}
return -EINVAL;
@@ -2768,14 +2764,41 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
{
+ struct e1000_hw *hw = &adapter->hw;
int err = -EINVAL;
+ if (hw->mac.type == e1000_i210 &&
+ !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i210 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) {
err = igb_rxnfc_write_etype_filter(adapter, input);
if (err)
return err;
}
+ if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igb_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igb_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGB_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI)
err = igb_rxnfc_write_vlan_prio_filter(adapter, input);
@@ -2824,6 +2847,15 @@ int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input)
igb_clear_vlan_prio_filter(adapter,
ntohs(input->filter.vlan_tci));
+ if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR)
+ igb_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGB_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR)
+ igb_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
return 0;
}
@@ -2865,7 +2897,7 @@ static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_behind(&parent->nfc_node, &input->nfc_node);
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
else
hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
@@ -2905,10 +2937,6 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
return -EINVAL;
- if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK &&
- fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK))
- return -EINVAL;
-
input = kzalloc(sizeof(*input), GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -2918,6 +2946,20 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE;
}
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index bebe43b3a836..3b83747b2700 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1c0bc30a16d..78574c06635b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,26 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
+/* Copyright(c) 2007 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -36,6 +15,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <linux/net_tstamp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -1700,7 +1680,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable) {
+ if (enable || queue == 0) {
+ /* i210 does not allow the queue 0 to be in the Strict
+ * Priority mode while the Qav mode is enabled, so,
+ * instead of disabling strict priority mode, we give
+ * queue 0 the maximum of credits possible.
+ *
+ * See section 8.12.19 of the i210 datasheet, "Note:
+ * Queue0 QueueMode must be set to 1b when
+ * TransmitMode is set to Qav."
+ */
+ if (queue == 0 && !enable) {
+ /* max "linkspeed" idleslope in kbps */
+ idleslope = 1000000;
+ hicredit = ETH_FRAME_LEN;
+ }
+
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
@@ -2498,6 +2493,250 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define VLAN_PRIO_FULL_MASK (0x07)
+
+static int igb_parse_cls_flower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *f,
+ int traffic_class,
+ struct igb_nfc_filter *input)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+
+ if (!is_zero_ether_addr(mask->dst)) {
+ if (!is_broadcast_ether_addr(mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |=
+ IGB_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr, key->dst);
+ }
+
+ if (!is_zero_ether_addr(mask->src)) {
+ if (!is_broadcast_ether_addr(mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |=
+ IGB_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr, key->src);
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+
+ if (mask->n_proto) {
+ if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
+ input->filter.etype = key->n_proto;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key, *mask;
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+
+ if (mask->vlan_priority) {
+ if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ return -EINVAL;
+ }
+
+ input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
+ input->filter.vlan_tci = key->vlan_priority;
+ }
+ }
+
+ input->action = traffic_class;
+ input->cookie = f->cookie;
+
+ return 0;
+}
+
+static int igb_configure_clsflower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct igb_nfc_filter *filter, *f;
+ int err, tc;
+
+ tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
+ return -EINVAL;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
+ if (err < 0)
+ goto err_parse;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
+ err = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack,
+ "This filter is already set in ethtool");
+ goto err_locked;
+ }
+ }
+
+ hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
+ if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
+ err = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack,
+ "This filter is already set in cls_flower");
+ goto err_locked;
+ }
+ }
+
+ err = igb_add_filter(adapter, filter);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
+ goto err_locked;
+ }
+
+ hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
+
+ spin_unlock(&adapter->nfc_lock);
+
+ return 0;
+
+err_locked:
+ spin_unlock(&adapter->nfc_lock);
+
+err_parse:
+ kfree(filter);
+
+ return err;
+}
+
+static int igb_delete_clsflower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct igb_nfc_filter *filter;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
+ if (filter->cookie == cls_flower->cookie)
+ break;
+
+ if (!filter) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = igb_erase_filter(adapter, filter);
+ if (err < 0)
+ goto out;
+
+ hlist_del(&filter->nfc_node);
+ kfree(filter);
+
+out:
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return igb_configure_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return igb_delete_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct igb_adapter *adapter = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return igb_setup_tc_cls_flower(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int igb_setup_tc_block(struct igb_adapter *adapter,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
+ adapter, adapter);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
+ adapter);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2506,6 +2745,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
+ case TC_SETUP_BLOCK:
+ return igb_setup_tc_block(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -2807,6 +3048,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+ if (hw->mac.type >= e1000_i350)
+ netdev->features |= NETIF_F_HW_TC;
+
#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPXIP4 | \
@@ -6841,8 +7085,35 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter)
igb_rar_set_index(adapter, 0);
}
-static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
- const u8 queue)
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGB_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
+ (flags & IGB_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
@@ -6857,12 +7128,13 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
* addresses.
*/
for (i = 0; i < rar_entries; i++) {
- if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, flags))
continue;
ether_addr_copy(adapter->mac_table[i].addr, addr);
adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
+ adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
igb_rar_set_index(adapter, i);
return i;
@@ -6871,9 +7143,22 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
return -ENOSPC;
}
-static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
const u8 queue)
{
+ return igb_add_mac_filter_flags(adapter, addr, queue, 0);
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGB_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
struct e1000_hw *hw = &adapter->hw;
int rar_entries = hw->mac.rar_entry_count -
adapter->vfs_allocated_count;
@@ -6889,14 +7174,26 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
for (i = 0; i < rar_entries; i++) {
if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
continue;
+ if ((adapter->mac_table[i].state & flags) != flags)
+ continue;
if (adapter->mac_table[i].queue != queue)
continue;
if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
continue;
- adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].queue = 0;
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue =
+ adapter->vfs_allocated_count;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
igb_rar_set_index(adapter, i);
return 0;
@@ -6905,6 +7202,34 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
return -ENOENT;
}
+static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
+ const u8 queue)
+{
+ return igb_del_mac_filter_flags(adapter, addr, queue, 0);
+}
+
+int igb_add_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* In theory, this should be supported on 82575 as well, but
+ * that part wasn't easily accessible during development.
+ */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ return igb_add_mac_filter_flags(adapter, addr, queue,
+ IGB_MAC_STATE_QUEUE_STEERING | flags);
+}
+
+int igb_del_mac_steering_filter(struct igb_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igb_del_mac_filter_flags(adapter, addr, queue,
+ IGB_MAC_STATE_QUEUE_STEERING | flags);
+}
+
static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -8748,12 +9073,24 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
if (is_valid_ether_addr(addr))
rar_high |= E1000_RAH_AV;
- if (hw->mac.type == e1000_82575)
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
+ rar_high |= E1000_RAH_ASEL_SRC_ADDR;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_i210:
+ if (adapter->mac_table[index].state &
+ IGB_MAC_STATE_QUEUE_STEERING)
+ rar_high |= E1000_RAH_QSEL_ENABLE;
+
rar_high |= E1000_RAH_POOL_1 *
adapter->mac_table[index].queue;
- else
+ break;
+ default:
rar_high |= E1000_RAH_POOL_1 <<
adapter->mac_table[index].queue;
+ break;
+ }
}
wr32(E1000_RAL(index), rar_low);
@@ -9191,6 +9528,9 @@ static void igb_nfc_filter_exit(struct igb_adapter *adapter)
hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
igb_erase_filter(adapter, rule);
+ hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
+ igb_erase_filter(adapter, rule);
+
spin_unlock(&adapter->nfc_lock);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7454b9895a65..9f4d700e09df 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,21 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
- *
- * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- */
+/* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> */
+
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index efe29dae384a..afd3e36eae75 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 - 2012 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 2009 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82576 VF ethernet driver
#
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 04bcfec0641b..4437f832412d 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index ca39e3cccaeb..3ae358b35227 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
/* ethtool support for igbvf */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f5bf248e22eb..eee26a3be90b 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
/* Linux PRO/1000 Ethernet Driver main header file */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 9195884096f8..163e5838f7c2 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#include "mbx.h"
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index 479b062fe9ee..e5b31818d565 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e2b7502f1953..f818f060e5a7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 614e52409f11..625a309a3355 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index bfe8d8297b2e..b8ba3f94c363 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,29 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#include "vf.h"
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 193b50026246..c71b0d7dbcee 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2012 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2009 - 2018 Intel Corporation. */
#ifndef _E1000_VF_H_
#define _E1000_VF_H_
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
index 1b42dd554dd2..2433e9300a33 100644
--- a/drivers/net/ethernet/intel/ixgb/Makefile
+++ b/drivers/net/ethernet/intel/ixgb/Makefile
@@ -1,33 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel PRO/10GbE Linux driver
# Copyright(c) 1999 - 2008 Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
-#
# Makefile for the Intel(R) PRO/10GbE ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 92022841755f..e85271b68410 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_H_
#define _IXGB_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
index eca216b9b859..129286fc1634 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
index 475297a810fe..3ee0a09e5d0a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_EE_H_
#define _IXGB_EE_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index d10a0d242dda..43744bf0fc1c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* ethtool support for ixgb */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index bf9a220f71fb..cbaa933ef30d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* ixgb_hw.c
* Shared functions for accessing and configuring the adapter
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 19f36d87ef61..6064583095da 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_HW_H_
#define _IXGB_HW_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 24e849902d60..9695b8215f01 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#ifndef _IXGB_IDS_H_
#define _IXGB_IDS_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 2353c383f0a7..62f2173bc20e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
index b1710379192e..7bd54efa698d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
/* glue for the OS independent part of ixgb
* includes register access macros
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 04a60640ddda..f0cadd532c53 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel PRO/10GbE Linux driver
- Copyright(c) 1999 - 2008 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2008 Intel Corporation. */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4cd96c88cb5d..5414685189ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,32 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7dd5038cfcc4..fc534e91c6b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -305,7 +279,6 @@ enum ixgbe_ring_state_t {
struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
- struct ixgbe_adapter *real_adapter;
unsigned int tx_base_queue;
unsigned int rx_base_queue;
int pool;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index cb0fe5fedb33..eee277c1bedf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 66a74f4651e8..1e49716f52bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1462,7 +1436,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0, hi_dword = 0;
+ u32 bucket_hash = 0;
+ __be32 hi_dword = 0;
int i;
/* Apply masks to input data */
@@ -1501,7 +1476,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
* Limit hash to 13 bits since max bucket count is 8K.
* Store result at the end of the input stream.
*/
- input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+ input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
}
/**
@@ -1610,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return IXGBE_ERR_CONFIG;
}
- switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
@@ -1680,13 +1655,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
+ fdirhash = (__force u32)input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1724,7 +1699,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
s32 err;
/* configure FDIRHASH register */
- fdirhash = input->formatted.bkt_hash;
+ fdirhash = (__force u32)input->formatted.bkt_hash;
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 633be93f3dbb..3f5c350716bb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -3652,7 +3626,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, (__force u32)cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b311382167a..4b531e8ae38a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_COMMON_H_
#define _IXGBE_COMMON_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index aaea8282bfd2..d26cea5b43bd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,31 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 73b6362d4327..60cd5863bf5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_CONFIG_H_
#define _DCB_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 085130626330..379ae747cdce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,31 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 7edce607f901..fdca41abb44c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_82598_CONFIG_H_
#define _DCB_82598_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1eed6811e914..7948849840a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index fa030f0abc18..c6f084883cab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _DCB_82599_CONFIG_H_
#define _DCB_82599_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index b33f3f87e4b1..c00332d2e02a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index ad54080488ee..55fe8114fe99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,30 +1,6 @@
-/*******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
#include <linux/debugfs.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c0e6ab42e0e1..bdd179c29ea4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for ixgbe */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7a09a40e4472..94b3165ff543 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include <linux/if_ether.h>
@@ -465,7 +440,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
- ddp->err = ddp_err;
+ ddp->err = (__force u32)ddp_err;
ddp->sgl = NULL;
ddp->sgc = 0;
/* fall through */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index cf1919901514..724f5382329f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_FCOE_H
#define _IXGBE_FCOE_H
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 68af127987bc..99b170f1efd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -1,29 +1,5 @@
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
#include "ixgbe.h"
#include <net/xfrm.h>
@@ -43,8 +19,9 @@ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
int i;
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
- IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
+ (__force u32)cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
IXGBE_WRITE_FLUSH(hw);
reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
@@ -93,7 +70,8 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
int i;
/* store the SPI (in bigendian) and IPidx */
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+ (__force u32)cpu_to_le32((__force u32)spi));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
IXGBE_WRITE_FLUSH(hw);
@@ -101,8 +79,9 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
/* store the key, salt, and mode */
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
+ (__force u32)cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
IXGBE_WRITE_FLUSH(hw);
@@ -121,7 +100,8 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
/* store the ip address */
for (i = 0; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
+ (__force u32)cpu_to_le32((__force u32)addr[i]));
IXGBE_WRITE_FLUSH(hw);
ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
@@ -391,7 +371,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
struct xfrm_state *ret = NULL;
rcu_read_lock();
- hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
+ (__force u32)spi) {
if (spi == rsa->xs->id.spi &&
((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
@@ -401,6 +382,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
xfrm_state_hold(ret);
break;
}
+ }
rcu_read_unlock();
return ret;
}
@@ -593,7 +575,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* hash the new entry for faster search in Rx path */
hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
- rsa.xs->id.spi);
+ (__force u64)rsa.xs->id.spi);
} else {
struct tx_sa tsa;
@@ -677,7 +659,8 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
if (!ipsec->ip_tbl[ipi].ref_cnt) {
memset(&ipsec->ip_tbl[ipi], 0,
sizeof(struct rx_ip_sa));
- ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+ ixgbe_ipsec_set_rx_ip(hw, ipi,
+ (__force __be32 *)zerobuf);
}
}
@@ -943,8 +926,8 @@ err2:
kfree(ipsec->ip_tbl);
kfree(ipsec->rx_tbl);
kfree(ipsec->tx_tbl);
+ kfree(ipsec);
err1:
- kfree(adapter->ipsec);
netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index 4f099f516645..9ef7faadda69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -1,30 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program. If not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
#ifndef _IXGBE_IPSEC_H_
#define _IXGBE_IPSEC_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed4cbe94c355..893a9206e718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 51e7d82a5860..a52d92e182ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -752,8 +727,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
ring_desc = "";
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
tx_buffer->next_to_watch,
@@ -864,15 +839,15 @@ rx_ring_summary:
/* Descriptor Done */
pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
rx_buffer_info->skb,
ring_desc);
} else {
pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
+ le64_to_cpu((__force __le64)u0->a),
+ le64_to_cpu((__force __le64)u0->b),
(u64)rx_buffer_info->dma,
rx_buffer_info->skb,
ring_desc);
@@ -1768,15 +1743,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
- skb->protocol = eth_type_trans(skb, dev);
-
/* record Rx queue, or update MACVLAN statistics */
if (netif_is_ixgbe(dev))
skb_record_rx_queue(skb, rx_ring->queue_index);
else
macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
- (skb->pkt_type == PACKET_BROADCAST) ||
- (skb->pkt_type == PACKET_MULTICAST));
+ false);
+
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -4219,7 +4193,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_offset, vf_shift;
+ u16 pool = adapter->num_rx_pools;
+ u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
@@ -4233,6 +4208,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ /* accept untagged packets until a vlan tag is
+ * specifically set for the VMDQ queue/pool
+ */
+ vmolr = IXGBE_VMOLR_AUPE;
+ while (pool--)
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
+
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
@@ -4900,36 +4882,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
-/**
- * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
- * @vfn: pool to associate with unicast addresses
- *
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
- **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int count = 0;
-
- /* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
- return -ENOMEM;
-
- if (!netdev_uc_empty(netdev)) {
- struct netdev_hw_addr *ha;
- netdev_for_each_uc_addr(ha, netdev) {
- ixgbe_del_mac_filter(adapter, ha->addr, vfn);
- ixgbe_add_mac_filter(adapter, ha->addr, vfn);
- count++;
- }
- }
- return count;
-}
-
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -5309,29 +5261,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
- struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /* No unicast promiscuous support for VMDQ devices. */
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
-
- /* clear the affected bit */
- vmolr &= ~IXGBE_VMOLR_MPE;
-
- if (dev->flags & IFF_ALLMULTI) {
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- vmolr |= IXGBE_VMOLR_ROMPE;
- hw->mac.ops.update_mc_addr_list(hw, dev);
- }
- ixgbe_write_uc_addr_list(adapter->netdev, pool);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5384,21 +5313,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static int ixgbe_fwd_ring_up(struct net_device *vdev,
+static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct net_device *vdev = accel->netdev;
int i, baseq, err;
- if (!test_bit(accel->pool, adapter->fwd_bitmask))
- return 0;
-
baseq = accel->pool * adapter->num_rx_queues_per_pool;
netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
baseq, baseq + adapter->num_rx_queues_per_pool);
- accel->netdev = vdev;
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
@@ -5415,26 +5340,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
- if (err >= 0) {
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ if (err >= 0)
return 0;
- }
+
+ /* if we cannot add the MAC rule then disable the offload */
+ macvlan_release_l2fw_offload(vdev);
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
+ netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ kfree(accel);
+
return err;
}
-static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
+static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
- if (dfwd->fwd_priv)
- ixgbe_fwd_ring_up(upper, vadapter);
- }
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ ixgbe_fwd_ring_up(adapter, accel);
return 0;
}
@@ -5442,7 +5377,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_upper_dev_walk, NULL);
+ ixgbe_macvlan_up, adapter);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -7816,7 +7751,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -8843,6 +8778,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_fwd_adapter *accel;
+ int pool;
+
+ /* we only care about macvlans... */
+ if (!netif_is_macvlan(vdev))
+ return 0;
+
+ /* that have hardware offload enabled... */
+ accel = macvlan_accel_priv(vdev);
+ if (!accel)
+ return 0;
+
+ /* If we can relocate to a different bit do so */
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool < adapter->num_rx_pools) {
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ return 0;
+ }
+
+ /* if we cannot find a free pool then disable the offload */
+ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
+ macvlan_release_l2fw_offload(vdev);
+ kfree(accel);
+
+ return 0;
+}
+
+static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* flush any stale bits out of the fwd bitmask */
+ bitmap_clear(adapter->fwd_bitmask, 1, 63);
+
+ /* walk through upper devices reassigning pools */
+ netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
+ adapter);
+}
+
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -8910,6 +8888,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_defrag_macvlan_pools(dev);
+
if (netif_running(dev))
return ixgbe_open(dev);
@@ -9014,13 +8994,12 @@ struct upper_walk_data {
static int get_macvlan_queue(struct net_device *upper, void *_data)
{
if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
struct upper_walk_data *data = _data;
struct ixgbe_adapter *adapter = data->adapter;
int ifindex = data->ifindex;
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue;
return 1;
@@ -9125,7 +9104,8 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
for (j = 0; field_ptr[j].val; j++) {
if (field_ptr[j].off == off) {
- field_ptr[j].val(input, mask, val, m);
+ field_ptr[j].val(input, mask, (__force u32)val,
+ (__force u32)m);
input->filter.formatted.flow_type |=
field_ptr[j].type;
found_entry = true;
@@ -9134,8 +9114,10 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
}
if (nexthdr) {
if (nexthdr->off == cls->knode.sel->keys[i].off &&
- nexthdr->val == cls->knode.sel->keys[i].val &&
- nexthdr->mask == cls->knode.sel->keys[i].mask)
+ nexthdr->val ==
+ (__force u32)cls->knode.sel->keys[i].val &&
+ nexthdr->mask ==
+ (__force u32)cls->knode.sel->keys[i].mask)
found_jump_field = true;
else
continue;
@@ -9239,7 +9221,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
for (i = 0; nexthdr[i].jump; i++) {
if (nexthdr[i].o != cls->knode.sel->offoff ||
nexthdr[i].s != cls->knode.sel->offshift ||
- nexthdr[i].m != cls->knode.sel->offmask)
+ nexthdr[i].m !=
+ (__force u32)cls->knode.sel->offmask)
return err;
jump = kzalloc(sizeof(*jump), GFP_KERNEL);
@@ -9460,6 +9443,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
return features;
}
+static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
+{
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
+
+ /* go back to full RSS if we're not running SR-IOV */
+ if (!adapter->ring_feature[RING_F_VMDQ].offset)
+ adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED);
+
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+
+ ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
+}
+
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -9540,7 +9539,9 @@ static int ixgbe_set_features(struct net_device *netdev,
}
}
- if (need_reset)
+ if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
+ ixgbe_reset_l2fw_offload(adapter);
+ else if (need_reset)
ixgbe_do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER))
@@ -9803,71 +9804,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
- int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
- unsigned int limit;
int pool, err;
- /* Hardware has a limited number of available pools. Each VF, and the
- * PF require a pool. Check to ensure we don't attempt to use more
- * then the available number of pools.
+ /* The hardware supported by ixgbe only filters on the destination MAC
+ * address. In order to avoid issues we only support offloading modes
+ * where the hardware can actually provide the functionality.
*/
- if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
- return ERR_PTR(-EINVAL);
+ if (!macvlan_supports_dest_filter(vdev))
+ return ERR_PTR(-EMEDIUMTYPE);
- if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
- (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
- return ERR_PTR(-EBUSY);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ if (pool == adapter->num_rx_pools) {
+ u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ u16 reserved_pools;
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
+ adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
+ return ERR_PTR(-EBUSY);
+
+ /* Hardware has a limited number of available pools. Each VF,
+ * and the PF require a pool. Check to ensure we don't
+ * attempt to use more then the available number of pools.
+ */
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
+ return ERR_PTR(-EBUSY);
- fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
- if (!fwd_adapter)
- return ERR_PTR(-ENOMEM);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED;
- pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- set_bit(pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
+ /* Try to reserve as many queues per pool as possible,
+ * we start with the configurations that support 4 queues
+ * per pools, followed by 2, and then by just 1 per pool.
+ */
+ if (used_pools < 32 && adapter->num_rx_pools < 16)
+ reserved_pools = min_t(u16,
+ 32 - used_pools,
+ 16 - adapter->num_rx_pools);
+ else if (adapter->num_rx_pools < 32)
+ reserved_pools = min_t(u16,
+ 64 - used_pools,
+ 32 - adapter->num_rx_pools);
+ else
+ reserved_pools = 64 - used_pools;
- /* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- fwd_adapter->pool = pool;
- fwd_adapter->real_adapter = adapter;
+ if (!reserved_pools)
+ return ERR_PTR(-EBUSY);
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
- if (!err && netif_running(pdev))
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ if (err)
+ return ERR_PTR(err);
- if (!err)
- return fwd_adapter;
+ if (pool >= adapter->num_rx_pools)
+ return ERR_PTR(-ENOMEM);
+ }
- /* unwind counter and free adapter struct */
- netdev_info(pdev,
- "%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, adapter->fwd_bitmask);
- kfree(fwd_adapter);
- return ERR_PTR(err);
+ accel = kzalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(pool, adapter->fwd_bitmask);
+ accel->pool = pool;
+ accel->netdev = vdev;
+
+ if (!netif_running(pdev))
+ return accel;
+
+ err = ixgbe_fwd_ring_up(adapter, accel);
+ if (err)
+ return ERR_PTR(err);
+
+ return accel;
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = accel->real_adapter;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
unsigned int rxbase = accel->rx_base_queue;
- unsigned int limit, i;
+ unsigned int i;
/* delete unicast filter associated with offloaded interface */
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
VMDQ_P(accel->pool));
- /* disable ability to receive packets for this pool */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
-
/* Allow remaining Rx packets to get flushed out of the
* Rx FIFO before we drop the netdev for the ring.
*/
@@ -9886,25 +9914,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
}
clear_bit(accel->pool, adapter->fwd_bitmask);
- limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
-
- /* go back to full RSS if we're done with our VMQs */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
- int rss = min_t(int, ixgbe_max_rss_indices(adapter),
- num_online_cpus());
-
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->ring_feature[RING_F_RSS].limit = rss;
- }
-
- ixgbe_setup_tc(pdev, adapter->hw_tcs);
- netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
- accel->pool, adapter->num_rx_pools,
- accel->rx_base_queue,
- accel->rx_base_queue +
- adapter->num_rx_queues_per_pool);
kfree(accel);
}
@@ -9986,7 +9995,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
}
} else {
for (i = 0; i < adapter->num_rx_queues; i++)
- xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
}
if (old_prog)
@@ -10925,14 +10935,14 @@ skip_bad_vf_detection:
rtnl_lock();
netif_device_detach(netdev);
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (netif_running(netdev))
- ixgbe_close_suspend(adapter);
-
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index a0cb84381cd0..5679293e53f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index c4628b663590..e085b6520dac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 72446644f9fa..1e6cf220f543 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux drive
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MODEL_H_
#define _IXGBE_MODEL_H_
@@ -53,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.src_ip[0] = val;
- mask->formatted.src_ip[0] = m;
+ input->filter.formatted.src_ip[0] = (__force __be32)val;
+ mask->formatted.src_ip[0] = (__force __be32)m;
return 0;
}
@@ -62,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.dst_ip[0] = val;
- mask->formatted.dst_ip[0] = m;
+ input->filter.formatted.dst_ip[0] = (__force __be32)val;
+ mask->formatted.dst_ip[0] = (__force __be32)m;
return 0;
}
@@ -79,10 +55,10 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
{
- input->filter.formatted.src_port = val & 0xffff;
- mask->formatted.src_port = m & 0xffff;
- input->filter.formatted.dst_port = val >> 16;
- mask->formatted.dst_port = m >> 16;
+ input->filter.formatted.src_port = (__force __be16)(val & 0xffff);
+ mask->formatted.src_port = (__force __be16)(m & 0xffff);
+ input->filter.formatted.dst_port = (__force __be16)(val >> 16);
+ mask->formatted.dst_port = (__force __be16)(m >> 16);
return 0;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 91bde90f9265..919a7af84b42 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index d6a7e77348c5..64e44e01c973 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_PHY_H_
#define _IXGBE_PHY_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index f6cc9166082a..b3e0d8bb5cbd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,30 +1,6 @@
-/*******************************************************************************
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
#include "ixgbe.h"
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 008aa073a679..6f59933cdff7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -266,7 +241,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
#endif
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
+ if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
rss = min_t(int, ixgbe_max_rss_indices(adapter),
@@ -312,7 +287,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* other values out of range.
*/
num_tc = adapter->hw_tcs;
- num_rx_pools = adapter->num_rx_pools;
+ num_rx_pools = bitmap_weight(adapter->fwd_bitmask,
+ adapter->num_rx_pools);
limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
@@ -878,14 +854,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
- if (!is_zero_ether_addr(vf_mac)) {
+ if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, ETH_ALEN);
} else {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
- dev_warn(&adapter->pdev->dev,
- "VF %d has no MAC address assigned, you may have to assign one manually\n",
- vf);
}
/*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index e30d1f07e891..3ec21923c89c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 24766e125592..204844288c16 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_common.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2daa81e6e9b2..e8ed37749ab1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,31 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index b8c5fd2a2115..de563cfd294d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,30 +1,5 @@
-/*******************************************************************************
-
- Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2016 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index 182d640e9f7a..e246c0d2a427 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,27 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 3123267dfba9..a8148c7126e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,26 +1,6 @@
-/*******************************************************************************
- *
- * Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * Linux NICS <linux.nics@intel.com>
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
+
#include "ixgbe_x540.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
@@ -898,8 +878,9 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
/* convert offset from words to bytes */
- buffer.address = cpu_to_be32((offset + current_word) * 2);
- buffer.length = cpu_to_be16(words_to_read * 2);
+ buffer.address = (__force u32)cpu_to_be32((offset +
+ current_word) * 2);
+ buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
buffer.pad2 = 0;
buffer.pad3 = 0;
@@ -1109,9 +1090,9 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
/* convert offset from words to bytes */
- buffer.address = cpu_to_be32(offset * 2);
+ buffer.address = (__force u32)cpu_to_be32(offset * 2);
/* one word */
- buffer.length = cpu_to_be16(sizeof(u16));
+ buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status)
@@ -3427,6 +3408,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
/* Reset PHY */
if (!hw->phy.reset_disable && hw->phy.ops.reset)
hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index bb47814cfa90..aba1e6a37a6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,31 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-################################################################################
-#
-# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
+# Copyright(c) 1999 - 2018 Intel Corporation.
#
# Makefile for the Intel(R) 82599 VF ethernet driver
#
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 71c828842b11..700d8eb2f6f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8e7d6c6f5c92..e7813d76527c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* ethtool support for ixgbevf */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 447ce1d5e0e3..70c75681495f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3d9033f26eff..083041129539 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2018 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -3420,7 +3397,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
- break;
+ goto err_setup_tx;
}
return 0;
@@ -4137,7 +4114,7 @@ out_drop:
return NETDEV_TX_OK;
}
-static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring;
@@ -4187,6 +4164,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
return -EPERM;
ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
@@ -4770,14 +4748,14 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
netif_device_detach(netdev);
+ if (netif_running(netdev))
+ ixgbevf_close_suspend(adapter);
+
if (state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (netif_running(netdev))
- ixgbevf_close_suspend(adapter);
-
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 2819abc454c7..6bc1953263b9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "mbx.h"
#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 5ec947fe3d09..bfd9ae150808 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 278f73980501..68d16ae5b65a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef _IXGBEVF_REGS_H_
#define _IXGBEVF_REGS_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 38d3a327c1bc..bf0577e819e1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,28 +1,5 @@
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 194fbdaa4519..d1e9e306653b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,29 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*******************************************************************************
-
- Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2015 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Copyright(c) 1999 - 2018 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ebe5c9148935..cc2f7701e71e 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -86,6 +86,7 @@ config MVPP2
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
+ select PHYLINK
---help---
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 0495487f7b42..c5dac6bd2be4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -348,10 +348,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
goto out_mdio;
}
- if (pdev->dev.of_node)
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- else
- ret = mdiobus_register(bus);
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
goto out_mdio;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 54a038943c06..6847cd431aa0 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -29,6 +29,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -359,15 +360,23 @@
#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
+#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4)
#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10)
#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
#define MVPP2_GMAC_STATUS0 0x10
#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
+#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
+#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
+#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7)
+#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
@@ -379,6 +388,8 @@
#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_CTRL_4_REG 0x90
#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
+#define MVPP22_CTRL4_RX_FC_EN BIT(3)
+#define MVPP22_CTRL4_TX_FC_EN BIT(4)
#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
@@ -392,6 +403,7 @@
#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
+#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
#define MVPP22_XLG_CTRL1_REG 0x104
#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
@@ -413,6 +425,7 @@
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
+#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
@@ -663,7 +676,7 @@ enum mvpp2_tag_type {
#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -916,6 +929,8 @@ static struct {
#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+
/* Definitions */
/* Shared Packet Processor resources */
@@ -940,6 +955,7 @@ struct mvpp2 {
struct clk *pp_clk;
struct clk *gop_clk;
struct clk *mg_clk;
+ struct clk *mg_core_clk;
struct clk *axi_clk;
/* List of pointers to port structures */
@@ -1014,6 +1030,9 @@ struct mvpp2_port {
/* Firmware node associated to the port */
struct fwnode_handle *fwnode;
+ /* Is a PHY always connected to the port */
+ bool has_phy;
+
/* Per-port registers' base address */
void __iomem *base;
void __iomem *stats_base;
@@ -1041,12 +1060,11 @@ struct mvpp2_port {
struct mutex gather_stats_lock;
struct delayed_work stats_work;
+ struct device_node *of_node;
+
phy_interface_t phy_interface;
- struct device_node *phy_node;
+ struct phylink *phylink;
struct phy *comphy;
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
struct mvpp2_bm_pool *pool_long;
struct mvpp2_bm_pool *pool_short;
@@ -1335,6 +1353,12 @@ struct mvpp2_bm_pool {
(addr) < (txq_pcpu)->tso_headers_dma + \
(txq_pcpu)->size * TSO_HEADER_SIZE)
+/* The prototype is added here to be used in start_dev when using ACPI. This
+ * will be removed once phylink is used for all modes (dt+ACPI).
+ */
+static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ const struct phylink_link_state *state);
+
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
@@ -1429,7 +1453,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return tx_desc->pp21.buf_dma_addr;
else
- return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
+ return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -1447,7 +1471,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
} else {
u64 val = (u64)addr;
- tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
+ tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -1507,7 +1531,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_dma_addr;
else
- return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
+ return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
@@ -1516,7 +1540,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_cookie;
else
- return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
+ return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
@@ -1732,7 +1756,6 @@ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
-
if (!(enable & BIT(i)))
continue;
@@ -1816,7 +1839,6 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
-
if (!(mask & BIT(i)))
continue;
@@ -2106,6 +2128,9 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
mvpp2_prs_sram_ai_update(&pe, 0,
MVPP2_PRS_SRAM_AI_MASK);
+ /* Set result info bits to 'single vlan' */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
/* If packet is tagged continue check vid filtering */
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
} else {
@@ -4846,6 +4871,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
mvpp22_gop_init_rgmii(port);
break;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
case PHY_INTERFACE_MODE_10GKR:
@@ -4883,7 +4910,9 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
u32 val;
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
/* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
@@ -4908,12 +4937,14 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
if (port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
- MVPP22_XLG_EXT_INT_MASK_GIG);
+ MVPP22_XLG_EXT_INT_MASK_GIG);
writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
}
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -4925,7 +4956,9 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
u32 val;
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
@@ -4940,6 +4973,16 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
mvpp22_gop_unmask_irq(port);
}
+/* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
+ *
+ * The PHY mode used by the PPv2 driver comes from the network subsystem, while
+ * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
+ * differ.
+ *
+ * The COMPHY configures the serdes lanes regardless of the actual use of the
+ * lanes by the physical layer. This is why configurations like
+ * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
+ */
static int mvpp22_comphy_init(struct mvpp2_port *port)
{
enum phy_mode mode;
@@ -4950,8 +4993,12 @@ static int mvpp22_comphy_init(struct mvpp2_port *port)
switch (port->phy_interface) {
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
mode = PHY_MODE_SGMII;
break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mode = PHY_MODE_2500SGMII;
+ break;
case PHY_INTERFACE_MODE_10GKR:
mode = PHY_MODE_10GKR;
break;
@@ -4966,133 +5013,6 @@ static int mvpp22_comphy_init(struct mvpp2_port *port)
return phy_power_on(port->comphy);
}
-static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
-{
- u32 val;
-
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
- writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
- } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
- val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
- MVPP22_CTRL4_SYNC_BYPASS_DIS |
- MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- val &= ~MVPP22_CTRL4_DP_CLK_SEL;
- writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
- }
-
- /* The port is connected to a copper PHY */
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN;
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
- val |= MVPP2_GMAC_IN_BAND_AUTONEG;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Force link down */
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- /* Configure the PCS and in-band AN */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
- } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
- val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
- }
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- mvpp2_port_mii_gmac_configure_mode(port);
-
- /* Unset the GMAC reset state */
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val &= ~MVPP2_GMAC_PORT_RESET_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
-
- /* Stop forcing link down */
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
-{
- u32 val;
-
- if (port->gop_id != 0)
- return;
-
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
-
- val = readl(port->base + MVPP22_XLG_CTRL4_REG);
- val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
- val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
- writel(val, port->base + MVPP22_XLG_CTRL4_REG);
-}
-
-static void mvpp22_port_mii_set(struct mvpp2_port *port)
-{
- u32 val;
-
- /* Only GOP port 0 has an XLG MAC */
- if (port->gop_id == 0) {
- val = readl(port->base + MVPP22_XLG_CTRL3_REG);
- val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
- port->phy_interface == PHY_INTERFACE_MODE_10GKR)
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
- else
- val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
-
- writel(val, port->base + MVPP22_XLG_CTRL3_REG);
- }
-}
-
-static void mvpp2_port_mii_set(struct mvpp2_port *port)
-{
- if (port->priv->hw_version == MVPP22)
- mvpp22_port_mii_set(port);
-
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
- mvpp2_port_mii_gmac_configure(port);
- else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
- mvpp2_port_mii_xlg_configure(port);
-}
-
-static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= MVPP2_GMAC_FC_ADV_EN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
static void mvpp2_port_enable(struct mvpp2_port *port)
{
u32 val;
@@ -5123,8 +5043,11 @@ static void mvpp2_port_disable(struct mvpp2_port *port)
(port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
- MVPP22_XLG_CTRL0_MAC_RESET_DIS);
+ val &= ~MVPP22_XLG_CTRL0_PORT_EN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+
+ /* Disable & reset should be done separately */
+ val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
} else {
val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
@@ -5144,18 +5067,21 @@ static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
}
/* Configure loopback port */
-static void mvpp2_port_loopback_set(struct mvpp2_port *port)
+static void mvpp2_port_loopback_set(struct mvpp2_port *port,
+ const struct phylink_link_state *state)
{
u32 val;
val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
- if (port->speed == 1000)
+ if (state->speed == 1000)
val |= MVPP2_GMAC_GMII_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -5328,10 +5254,6 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
int tx_port_num, val, queue, ptxq, lrxq;
if (port->priv->hw_version == MVPP21) {
- /* Configure port to loopback if needed */
- if (port->flags & MVPP2_F_LOOPBACK)
- mvpp2_port_loopback_set(port);
-
/* Update TX FIFO MIN Threshold */
val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
@@ -5549,7 +5471,6 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
-
/* Check if there are enough free descriptors in aggregated txq.
* If not, update the number of occupied descriptors and repeat the check.
*
@@ -5566,11 +5487,10 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
MVPP2_AGGR_TXQ_STATUS_REG(cpu));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
- }
-
- if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
- return -ENOMEM;
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
+ return -ENOMEM;
+ }
return 0;
}
@@ -5630,7 +5550,7 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
- /* OK, the descriptor cound has been updated: check again. */
+ /* OK, the descriptor could have been updated: check again. */
if (txq_pcpu->reserved_num < num)
return -ENOMEM;
return 0;
@@ -6112,7 +6032,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
* TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
- * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+ * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
*/
desc_per_txq = 16;
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
@@ -6369,7 +6289,9 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
link = true;
}
} else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
val = readl(port->base + MVPP22_GMAC_INT_STAT);
if (val & MVPP22_GMAC_INT_STAT_LINK) {
event = true;
@@ -6379,6 +6301,11 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
}
}
+ if (port->phylink) {
+ phylink_mac_change(port->phylink, link);
+ goto handled;
+ }
+
if (!netif_running(dev) || !event)
goto handled;
@@ -6403,111 +6330,6 @@ handled:
return IRQ_HANDLED;
}
-static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
- struct phy_device *phydev)
-{
- u32 val;
-
- if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- port->phy_interface != PHY_INTERFACE_MODE_SGMII)
- return;
-
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN);
-
- if (phydev->duplex)
- val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (phydev->speed == SPEED_1000)
- val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
-/* Adjust link */
-static void mvpp2_link_event(struct net_device *dev)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- bool link_reconfigured = false;
- u32 val;
-
- if (phydev->link) {
- if (port->phy_interface != phydev->interface && port->comphy) {
- /* disable current port for reconfiguration */
- mvpp2_interrupts_disable(port);
- netif_carrier_off(port->dev);
- mvpp2_port_disable(port);
- phy_power_off(port->comphy);
-
- /* comphy reconfiguration */
- port->phy_interface = phydev->interface;
- mvpp22_comphy_init(port);
-
- /* gop/mac reconfiguration */
- mvpp22_gop_init(port);
- mvpp2_port_mii_set(port);
-
- link_reconfigured = true;
- }
-
- if ((port->speed != phydev->speed) ||
- (port->duplex != phydev->duplex)) {
- mvpp2_gmac_set_autoneg(port, phydev);
-
- port->duplex = phydev->duplex;
- port->speed = phydev->speed;
- }
- }
-
- if (phydev->link != port->link || link_reconfigured) {
- port->link = phydev->link;
-
- if (phydev->link) {
- if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val |= (MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_FORCE_LINK_DOWN);
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- }
-
- mvpp2_interrupts_enable(port);
- mvpp2_port_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- } else {
- port->duplex = -1;
- port->speed = 0;
-
- netif_tx_stop_all_queues(dev);
- netif_carrier_off(dev);
- mvpp2_ingress_disable(port);
- mvpp2_egress_disable(port);
-
- mvpp2_port_disable(port);
- mvpp2_interrupts_disable(port);
- }
-
- phy_print_status(phydev);
- }
-}
-
static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
{
ktime_t interval;
@@ -6559,21 +6381,23 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
{
u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
+ char *err_str = NULL;
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
case MVPP2_RXD_ERR_CRC:
- netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
- status, sz);
+ err_str = "crc";
break;
case MVPP2_RXD_ERR_OVERRUN:
- netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
- status, sz);
+ err_str = "overrun";
break;
case MVPP2_RXD_ERR_RESOURCE:
- netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
- status, sz);
+ err_str = "resource";
break;
}
+ if (err_str && net_ratelimit())
+ netdev_err(port->dev,
+ "bad rx status %08x (%s error), size=%zu\n",
+ status, err_str, sz);
}
/* Handle RX checksum offload */
@@ -6778,8 +6602,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
mvpp2_txdesc_size_set(port, tx_desc, frag->size);
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size,
- DMA_TO_DEVICE);
+ frag->size, DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -7115,11 +6938,29 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
return rx_done;
}
-/* Set hw internals when starting port */
-static void mvpp2_start_dev(struct mvpp2_port *port)
+static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
{
- struct net_device *ndev = port->dev;
- int i;
+ u32 ctrl3;
+
+ /* comphy reconfiguration */
+ mvpp22_comphy_init(port);
+
+ /* gop reconfiguration */
+ mvpp22_gop_init(port);
+
+ /* Only GOP port 0 has an XLG MAC */
+ if (port->gop_id == 0) {
+ ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+
+ if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR)
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
+ else
+ ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
+
+ writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
+ }
if (port->gop_id == 0 &&
(port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
@@ -7127,6 +6968,12 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_xlg_max_rx_size_set(port);
else
mvpp2_gmac_max_rx_size_set(port);
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+ int i;
mvpp2_txp_max_tx_size_set(port);
@@ -7136,42 +6983,39 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
/* Enable interrupts on all CPUs */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22) {
- mvpp22_comphy_init(port);
- mvpp22_gop_init(port);
+ if (port->priv->hw_version == MVPP22)
+ mvpp22_mode_reconfigure(port);
+
+ if (port->phylink) {
+ phylink_start(port->phylink);
+ } else {
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ .link = 1,
+ };
+ mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
}
- mvpp2_port_mii_set(port);
- mvpp2_port_enable(port);
- if (ndev->phydev)
- phy_start(ndev->phydev);
netif_tx_start_all_queues(port->dev);
}
/* Set hw internals when stopping port */
static void mvpp2_stop_dev(struct mvpp2_port *port)
{
- struct net_device *ndev = port->dev;
int i;
- /* Stop new packets from arriving to RXQs */
- mvpp2_ingress_disable(port);
-
- mdelay(10);
-
/* Disable interrupts on all CPUs */
mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++)
napi_disable(&port->qvecs[i].napi);
- netif_carrier_off(port->dev);
- netif_tx_stop_all_queues(port->dev);
-
- mvpp2_egress_disable(port);
- mvpp2_port_disable(port);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
+ if (port->phylink)
+ phylink_stop(port->phylink);
phy_power_off(port->comphy);
}
@@ -7230,40 +7074,6 @@ static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
}
-static int mvpp2_phy_connect(struct mvpp2_port *port)
-{
- struct phy_device *phy_dev;
-
- /* No PHY is attached */
- if (!port->phy_node)
- return 0;
-
- phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
- port->phy_interface);
- if (!phy_dev) {
- netdev_err(port->dev, "cannot connect to phy\n");
- return -ENODEV;
- }
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->advertising = phy_dev->supported;
-
- port->link = 0;
- port->duplex = 0;
- port->speed = 0;
-
- return 0;
-}
-
-static void mvpp2_phy_disconnect(struct mvpp2_port *port)
-{
- struct net_device *ndev = port->dev;
-
- if (!ndev->phydev)
- return;
-
- phy_disconnect(ndev->phydev);
-}
-
static int mvpp2_irqs_init(struct mvpp2_port *port)
{
int err, i;
@@ -7347,6 +7157,7 @@ static int mvpp2_open(struct net_device *dev)
struct mvpp2 *priv = port->priv;
unsigned char mac_bcast[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ bool valid = false;
int err;
err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
@@ -7389,7 +7200,19 @@ static int mvpp2_open(struct net_device *dev)
goto err_cleanup_txqs;
}
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
+ /* Phylink isn't supported yet in ACPI mode */
+ if (port->of_node) {
+ err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+ if (err) {
+ netdev_err(port->dev, "could not attach PHY (%d)\n",
+ err);
+ goto err_free_irq;
+ }
+
+ valid = true;
+ }
+
+ if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port);
if (err) {
@@ -7399,14 +7222,20 @@ static int mvpp2_open(struct net_device *dev)
}
mvpp22_gop_setup_irq(port);
- }
- /* In default link is down */
- netif_carrier_off(port->dev);
+ /* In default link is down */
+ netif_carrier_off(port->dev);
- err = mvpp2_phy_connect(port);
- if (err < 0)
- goto err_free_link_irq;
+ valid = true;
+ } else {
+ port->link_irq = 0;
+ }
+
+ if (!valid) {
+ netdev_err(port->dev,
+ "invalid configuration: no dt or link IRQ");
+ goto err_free_irq;
+ }
/* Unmask interrupts on all CPUs */
on_each_cpu(mvpp2_interrupts_unmask, port, 1);
@@ -7423,9 +7252,6 @@ static int mvpp2_open(struct net_device *dev)
return 0;
-err_free_link_irq:
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
- free_irq(port->link_irq, port);
err_free_irq:
mvpp2_irqs_deinit(port);
err_cleanup_txqs:
@@ -7439,17 +7265,17 @@ static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
- struct mvpp2 *priv = port->priv;
int cpu;
mvpp2_stop_dev(port);
- mvpp2_phy_disconnect(port);
/* Mask interrupts on all CPUs */
on_each_cpu(mvpp2_interrupts_mask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, true);
- if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
+ if (port->phylink)
+ phylink_disconnect_phy(port->phylink);
+ if (port->link_irq)
free_irq(port->link_irq, port);
mvpp2_irqs_deinit(port);
@@ -7532,42 +7358,18 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
static int mvpp2_set_mac_address(struct net_device *dev, void *p)
{
- struct mvpp2_port *port = netdev_priv(dev);
const struct sockaddr *addr = p;
int err;
- if (!is_valid_ether_addr(addr->sa_data)) {
- err = -EADDRNOTAVAIL;
- goto log_error;
- }
-
- if (!netif_running(dev)) {
- err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
- if (!err)
- return 0;
- /* Reconfigure parser to accept the original MAC address */
- err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
- if (!err)
- goto out_start;
-
- /* Reconfigure parser accept the original MAC address */
- err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
- if (err)
- goto log_error;
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- return 0;
-log_error:
- netdev_err(dev, "failed to change MAC address\n");
+ if (err) {
+ /* Reconfigure parser accept the original MAC address */
+ mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+ netdev_err(dev, "failed to change MAC address\n");
+ }
return err;
}
@@ -7655,16 +7457,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- int ret;
+ struct mvpp2_port *port = netdev_priv(dev);
- if (!dev->phydev)
+ if (!port->phylink)
return -ENOTSUPP;
- ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
- if (!ret)
- mvpp2_link_event(dev);
-
- return ret;
+ return phylink_mii_ioctl(port->phylink, ifr, cmd);
}
static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -7711,6 +7509,16 @@ static int mvpp2_set_features(struct net_device *dev,
/* Ethtool methods */
+static int mvpp2_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(port->phylink);
+}
+
/* Set interrupt coalescing for ethtools */
static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
@@ -7839,6 +7647,50 @@ err_out:
return err;
}
+static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return;
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -7856,18 +7708,20 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .set_coalesce = mvpp2_ethtool_set_coalesce,
- .get_coalesce = mvpp2_ethtool_get_coalesce,
- .get_drvinfo = mvpp2_ethtool_get_drvinfo,
- .get_ringparam = mvpp2_ethtool_get_ringparam,
- .set_ringparam = mvpp2_ethtool_set_ringparam,
- .get_strings = mvpp2_ethtool_get_strings,
- .get_ethtool_stats = mvpp2_ethtool_get_stats,
- .get_sset_count = mvpp2_ethtool_get_sset_count,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = mvpp2_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .set_coalesce = mvpp2_ethtool_set_coalesce,
+ .get_coalesce = mvpp2_ethtool_get_coalesce,
+ .get_drvinfo = mvpp2_ethtool_get_drvinfo,
+ .get_ringparam = mvpp2_ethtool_get_ringparam,
+ .set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_strings = mvpp2_ethtool_get_strings,
+ .get_ethtool_stats = mvpp2_ethtool_get_stats,
+ .get_sset_count = mvpp2_ethtool_get_sset_count,
+ .get_pauseparam = mvpp2_ethtool_get_pause_param,
+ .set_pauseparam = mvpp2_ethtool_set_pause_param,
+ .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
+ .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -8169,18 +8023,361 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
eth_hw_addr_random(dev);
}
+static void mvpp2_phylink_validate(struct net_device *dev,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ /* Fall-through */
+ default:
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10000baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void mvpp22_xlg_link_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
+{
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static void mvpp2_gmac_link_state(struct mvpp2_port *port,
+ struct phylink_link_state *state)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_phylink_mac_link_state(struct net_device *dev,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
+ mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
+
+ if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
+ mvpp22_xlg_link_state(port, state);
+ return 1;
+ }
+ }
+
+ mvpp2_gmac_link_state(port, state);
+ return 1;
+}
+
+static void mvpp2_mac_an_restart(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (port->phy_interface != PHY_INTERFACE_MODE_SGMII)
+ return;
+
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ /* The RESTART_AN bit is cleared by the h/w after restarting the AN
+ * process.
+ */
+ val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 ctrl0, ctrl4;
+
+ ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
+
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+
+ ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
+ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+
+ writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
+ writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
+}
+
+static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u32 an, ctrl0, ctrl2, ctrl4;
+
+ an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+ ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+
+ /* Force link down */
+ an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ an |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* Set the GMAC in a reset state */
+ ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
+ MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+ ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
+ ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
+ * they negotiate duplex: they are always operating with a fixed
+ * speed of 1000/2500Mbps in full duplex, so force 1000/2500
+ * speed and full duplex here.
+ */
+ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+ an |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ } else if (!phy_interface_mode_is_rgmii(state->interface)) {
+ an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+ }
+
+ if (state->duplex)
+ an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ if (phylink_test(state->advertising, Pause))
+ an |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(state->advertising, Asym_Pause))
+ an |= MVPP2_GMAC_FC_ADV_ASM_EN;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG;
+ ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
+
+ ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+ ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_DP_CLK_SEL |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
+ if (state->pause & MLO_PAUSE_RX)
+ ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS;
+
+ if (state->speed == SPEED_1000)
+ an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ an |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+ ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
+ ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+ MVPP22_CTRL4_SYNC_BYPASS_DIS |
+ MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+ }
+
+ writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
+ writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
+ writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Check for invalid configuration */
+ if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
+ netdev_err(dev, "Invalid mode on %s\n", dev->name);
+ return;
+ }
+
+ netif_tx_stop_all_queues(port->dev);
+ if (!port->has_phy)
+ netif_carrier_off(port->dev);
+
+ /* Make sure the port is disabled when reconfiguring the mode */
+ mvpp2_port_disable(port);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != state->interface) {
+ port->phy_interface = state->interface;
+
+ /* Reconfigure the serdes lanes */
+ phy_power_off(port->comphy);
+ mvpp22_mode_reconfigure(port);
+ }
+
+ /* mac (re)configuration */
+ if (state->interface == PHY_INTERFACE_MODE_10GKR)
+ mvpp2_xlg_config(port, mode, state);
+ else if (phy_interface_mode_is_rgmii(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mvpp2_gmac_config(port, mode, state);
+
+ if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
+ mvpp2_port_loopback_set(port, state);
+
+ /* If the port already was up, make sure it's still in the same state */
+ if (state->link || !port->has_phy) {
+ mvpp2_port_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ if (!port->has_phy)
+ netif_carrier_on(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+}
+
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_10GKR) {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= MVPP2_GMAC_FORCE_LINK_PASS;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
+ mvpp2_port_enable(port);
+
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ netif_tx_wake_all_queues(dev);
+}
+
+static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ u32 val;
+
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_10GKR) {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
+ netif_tx_stop_all_queues(dev);
+ mvpp2_egress_disable(port);
+ mvpp2_ingress_disable(port);
+
+ /* When using link interrupts to notify phylink of a MAC state change,
+ * we do not want the port to be disabled (we want to receive further
+ * interrupts, to be notified when the port will have a link later).
+ */
+ if (!port->has_phy)
+ return;
+
+ mvpp2_port_disable(port);
+}
+
+static const struct phylink_mac_ops mvpp2_phylink_ops = {
+ .validate = mvpp2_phylink_validate,
+ .mac_link_state = mvpp2_phylink_mac_link_state,
+ .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_config = mvpp2_mac_config,
+ .mac_link_up = mvpp2_mac_link_up,
+ .mac_link_down = mvpp2_mac_link_down,
+};
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
struct mvpp2 *priv)
{
- struct device_node *phy_node;
struct phy *comphy = NULL;
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
struct net_device *dev;
struct resource *res;
+ struct phylink *phylink;
char *mac_from = "";
unsigned int ntxqs, nrxqs;
bool has_tx_irqs;
@@ -8209,11 +8406,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!dev)
return -ENOMEM;
- if (port_node)
- phy_node = of_parse_phandle(port_node, "phy", 0);
- else
- phy_node = NULL;
-
phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
@@ -8246,6 +8438,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port = netdev_priv(dev);
port->dev = dev;
port->fwnode = port_fwnode;
+ port->has_phy = !!of_find_property(port_node, "phy", NULL);
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -8276,7 +8469,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
else
port->first_rxq = port->id * priv->max_port_rxqs;
- port->phy_node = phy_node;
+ port->of_node = port_node;
port->phy_interface = phy_mode;
port->comphy = comphy;
@@ -8337,9 +8530,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_port_periodic_xon_disable(port);
- if (priv->hw_version == MVPP21)
- mvpp2_port_fc_adv_enable(port);
-
mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
@@ -8383,10 +8573,23 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ /* Phylink isn't used w/ ACPI as of now */
+ if (port_node) {
+ phylink = phylink_create(dev, port_fwnode, phy_mode,
+ &mvpp2_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_port_pcpu;
+ }
+ port->phylink = phylink;
+ } else {
+ port->phylink = NULL;
+ }
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_port_pcpu;
+ goto err_phylink;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -8394,6 +8597,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
return 0;
+err_phylink:
+ if (port->phylink)
+ phylink_destroy(port->phylink);
err_free_port_pcpu:
free_percpu(port->pcpu);
err_free_txq_pcpu:
@@ -8407,7 +8613,6 @@ err_free_irq:
err_deinit_qvecs:
mvpp2_queue_vectors_deinit(port);
err_free_netdev:
- of_node_put(phy_node);
free_netdev(dev);
return err;
}
@@ -8418,7 +8623,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
- of_node_put(port->phy_node);
+ if (port->phylink)
+ phylink_destroy(port->phylink);
free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < port->ntxqs; i++)
@@ -8766,18 +8972,27 @@ static int mvpp2_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->mg_clk);
if (err < 0)
goto err_gop_clk;
+
+ priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
+ if (IS_ERR(priv->mg_core_clk)) {
+ priv->mg_core_clk = NULL;
+ } else {
+ err = clk_prepare_enable(priv->mg_core_clk);
+ if (err < 0)
+ goto err_mg_clk;
+ }
}
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
if (IS_ERR(priv->axi_clk)) {
err = PTR_ERR(priv->axi_clk);
if (err == -EPROBE_DEFER)
- goto err_gop_clk;
+ goto err_mg_core_clk;
priv->axi_clk = NULL;
} else {
err = clk_prepare_enable(priv->axi_clk);
if (err < 0)
- goto err_gop_clk;
+ goto err_mg_core_clk;
}
/* Get system's tclk rate */
@@ -8789,9 +9004,9 @@ static int mvpp2_probe(struct platform_device *pdev)
}
if (priv->hw_version == MVPP22) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+ err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
- goto err_mg_clk;
+ goto err_axi_clk;
/* Sadly, the BM pools all share the same register to
* store the high 32 bits of their address. So they
* must all have the same high 32 bits, which forces
@@ -8799,14 +9014,14 @@ static int mvpp2_probe(struct platform_device *pdev)
*/
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err_mg_clk;
+ goto err_axi_clk;
}
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
dev_err(&pdev->dev, "failed to initialize controller\n");
- goto err_mg_clk;
+ goto err_axi_clk;
}
/* Initialize ports */
@@ -8819,7 +9034,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
- goto err_mg_clk;
+ goto err_axi_clk;
}
/* Statistics must be gathered regularly because some of them (like
@@ -8847,8 +9062,13 @@ err_port_probe:
mvpp2_port_remove(priv->port_list[i]);
i++;
}
-err_mg_clk:
+err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
+
+err_mg_core_clk:
+ if (priv->hw_version == MVPP22)
+ clk_disable_unprepare(priv->mg_core_clk);
+err_mg_clk:
if (priv->hw_version == MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
@@ -8895,6 +9115,7 @@ static int mvpp2_remove(struct platform_device *pdev)
return 0;
clk_disable_unprepare(priv->axi_clk);
+ clk_disable_unprepare(priv->mg_core_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a30a2e95d13f..f11b45001cad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1027,6 +1027,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
+ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+ __func__, MLX4_EN_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+ __func__, MLX4_EN_MAX_COAL_PKTS);
+ return -ERANGE;
+ }
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0adac4a9a19..9670b33fc9b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3324,12 +3324,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MAX_TX_RINGS, GFP_KERNEL);
if (!priv->tx_ring[t]) {
err = -ENOMEM;
- goto err_free_tx;
+ goto out;
}
priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
MAX_TX_RINGS, GFP_KERNEL);
if (!priv->tx_cq[t]) {
- kfree(priv->tx_ring[t]);
err = -ENOMEM;
goto out;
}
@@ -3582,11 +3581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
return 0;
-err_free_tx:
- while (t--) {
- kfree(priv->tx_ring[t]);
- kfree(priv->tx_cq[t]);
- }
out:
mlx4_en_destroy_netdev(dev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5c613c6663da..9f54ccbddea7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -593,30 +593,25 @@ static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#if IS_ENABLED(CONFIG_IPV6)
-/* In IPv6 packets, besides subtracting the pseudo header checksum,
- * we also compute/add the IP header checksum which
- * is not added by the HW.
+/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
+ * 4 first bytes : priority, version, flow_lbl
+ * and 2 additional bytes : nexthdr, hop_limit.
*/
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
__u8 nexthdr = ipv6h->nexthdr;
- __wsum csum_pseudo_hdr = 0;
+ __wsum temp;
if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
nexthdr == IPPROTO_HOPOPTS ||
nexthdr == IPPROTO_SCTP))
return -1;
- hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
- csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
- sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
- csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
- (__force __wsum)htons(nexthdr));
-
- skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
- skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
+ /* priority, version, flow_lbl */
+ temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
+ /* nexthdr and hop_limit */
+ skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
return 0;
}
#endif
@@ -775,8 +770,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ length = xdp.data_end - xdp.data;
if (xdp.data != orig_data) {
- length = xdp.data_end - xdp.data;
frags[0].page_offset = xdp.data -
xdp.data_hard_start;
va = xdp.data;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6b6853773848..0227786308af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return skb_tx_hash(dev, skb);
+ return fallback(dev, skb);
return fallback(dev, skb) % rings_p_up;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index de6b3d416148..46dcbfbe4c5e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -165,6 +165,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[36] = "QinQ VST mode support",
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
+ [39] = "Report driver version to FW support",
};
int i;
@@ -1038,6 +1039,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+ if (field32 & (1 << 8))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
if (field32 & (1 << 17))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
@@ -1860,6 +1863,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
+#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
@@ -1950,6 +1955,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
+ u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
+
+ strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1);
+ mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index bfef69235d71..0a30d81aab3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
static int msi_x = 1;
module_param(msi_x, int, 0444);
-MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
#else /* CONFIG_PCI_MSI */
@@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev)
ret = mlx4_unbond_fs_rules(dev);
if (ret)
- mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
+ mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
ret1 = mlx4_unbond_mac_table(dev);
if (ret1) {
mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
@@ -2815,6 +2815,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
dev->caps.num_eqs - dev->caps.reserved_eqs,
MAX_MSIX);
+ if (msi_x > 1)
+ nreq = min_t(int, nreq, msi_x);
+
entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -2929,6 +2932,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create file for port %d\n", port);
devlink_port_unregister(&info->devlink_port);
info->port = -1;
+ return err;
}
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -2950,9 +2954,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
&info->port_attr);
devlink_port_unregister(&info->devlink_port);
info->port = -1;
+ return err;
}
- return err;
+ return 0;
}
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
@@ -4125,17 +4130,68 @@ static const struct pci_error_handlers mlx4_err_handler = {
.resume = mlx4_pci_resume,
};
+static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+
+ mlx4_err(dev, "suspend was called\n");
+ mutex_lock(&persist->interface_state_mutex);
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ mlx4_unload_one(pdev);
+ mutex_unlock(&persist->interface_state_mutex);
+
+ return 0;
+}
+
+static int mlx4_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int ret = 0;
+
+ mlx4_err(dev, "resume was called\n");
+ total_vfs = dev->persist->num_vfs;
+ memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+
+ mutex_lock(&persist->interface_state_mutex);
+ if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
+ ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
+ nvfs, priv, 1);
+ if (!ret) {
+ ret = restore_current_port_types(dev,
+ dev->persist->curr_port_type,
+ dev->persist->curr_port_poss_type);
+ if (ret)
+ mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
+ }
+ }
+ mutex_unlock(&persist->interface_state_mutex);
+
+ return ret;
+}
+
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
.shutdown = mlx4_shutdown,
.remove = mlx4_remove_one,
+ .suspend = mlx4_suspend,
+ .resume = mlx4_resume,
.err_handler = &mlx4_err_handler,
};
static int __init mlx4_verify_params(void)
{
+ if (msi_x < 0) {
+ pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
+ return -1;
+ }
+
if ((log_num_mac < 0) || (log_num_mac > 7)) {
pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c68da1986e51..cb9e923e8399 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -55,8 +55,8 @@
#include "fw_qos.h"
#define DRV_NAME "mlx4_core"
-#define PFX DRV_NAME ": "
#define DRV_VERSION "4.0-0"
+#define DRV_NAME_FOR_FW "Linux," DRV_NAME "," DRV_VERSION
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f7c81133594f..ace6545f82e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -132,6 +132,9 @@
#define MLX4_EN_TX_COAL_PKTS 16
#define MLX4_EN_TX_COAL_TIME 0x10
+#define MLX4_EN_MAX_COAL_PKTS U16_MAX
+#define MLX4_EN_MAX_COAL_TIME U16_MAX
+
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
@@ -552,8 +555,8 @@ struct mlx4_en_priv {
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
- u16 sample_interval;
- u16 adaptive_rx_coal;
+ u32 sample_interval;
+ u32 adaptive_rx_coal;
u32 msg_enable;
u32 loopback_ok;
u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 12257034131e..ee6684779d11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -86,3 +86,14 @@ config MLX5_EN_IPSEC
Build support for IPsec cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
+
+config MLX5_EN_TLS
+ bool "TLS cryptography-offload accelaration"
+ depends on MLX5_CORE_EN
+ depends on TLS_DEVICE
+ depends on MLX5_ACCEL
+ default n
+ ---help---
+ Build support for TLS cryptography-offload accelaration in the NIC.
+ Note: Support for hardware with this capability needs to be selected
+ for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c805769d92a9..a7135f5d5cf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -8,10 +8,10 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
+mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
- fpga/ipsec.o
+ fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
@@ -28,4 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
+
CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
new file mode 100644
index 000000000000..77ac19f38cbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+
+#include "accel/tls.h"
+#include "mlx5_core.h"
+#include "fpga/tls.h"
+
+int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid)
+{
+ return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid);
+}
+
+void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+{
+ mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+}
+
+bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_is_tls_device(mdev);
+}
+
+u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_tls_device_caps(mdev);
+}
+
+int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
+{
+ return mlx5_fpga_tls_init(mdev);
+}
+
+void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
+{
+ mlx5_fpga_tls_cleanup(mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
new file mode 100644
index 000000000000..6f9c9f446ecc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_ACCEL_TLS_H__
+#define __MLX5_ACCEL_TLS_H__
+
+#include <linux/mlx5/driver.h>
+#include <linux/tls.h>
+
+#ifdef CONFIG_MLX5_ACCEL
+
+enum {
+ MLX5_ACCEL_TLS_TX = BIT(0),
+ MLX5_ACCEL_TLS_RX = BIT(1),
+ MLX5_ACCEL_TLS_V12 = BIT(2),
+ MLX5_ACCEL_TLS_V13 = BIT(3),
+ MLX5_ACCEL_TLS_LRO = BIT(4),
+ MLX5_ACCEL_TLS_IPV6 = BIT(5),
+ MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
+ MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
+};
+
+struct mlx5_ifc_tls_flow_bits {
+ u8 src_port[0x10];
+ u8 dst_port[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+ u8 ipv6[0x1];
+ u8 direction_sx[0x1];
+ u8 reserved_at_2[0x1e];
+};
+
+int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid);
+void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
+u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
+int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
+void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
+
+#else
+
+static inline int
+mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
+static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
+static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
+static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
+static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
+
+#endif
+
+#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 21cd1703a862..487388aed98f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -135,6 +135,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
+static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
+{
+ int size = msg->len;
+ int blen = size - min_t(int, sizeof(msg->first.data), size);
+
+ return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
+}
+
static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
@@ -174,10 +182,7 @@ static void calc_block_sig(struct mlx5_cmd_prot_block *block)
static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
- int size = msg->len;
- int blen = size - min_t(int, sizeof(msg->first.data), size);
- int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
- / MLX5_CMD_DATA_BLOCK_SIZE;
+ int n = mlx5_calc_cmd_blocks(msg);
int i = 0;
for (i = 0; i < n && next; i++) {
@@ -220,12 +225,9 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent)
static int verify_signature(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd_mailbox *next = ent->out->next;
+ int n = mlx5_calc_cmd_blocks(ent->out);
int err;
u8 sig;
- int size = ent->out->len;
- int blen = size - min_t(int, sizeof(ent->out->first.data), size);
- int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
- / MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
@@ -720,9 +722,11 @@ static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
+ int n = mlx5_calc_cmd_blocks(msg);
int data_only;
u32 offset = 0;
int dump_len;
+ int i;
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
@@ -749,7 +753,7 @@ static void dump_command(struct mlx5_core_dev *dev,
offset += sizeof(*ent->lay);
}
- while (next && offset < msg->len) {
+ for (i = 0; i < n && next; i++) {
if (data_only) {
dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
dump_buf(next->buf, dump_len, 1, offset);
@@ -1137,7 +1141,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_msg *msg;
- int blen;
int err;
int n;
int i;
@@ -1146,8 +1149,8 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
if (!msg)
return ERR_PTR(-ENOMEM);
- blen = size - min_t(int, sizeof(msg->first.data), size);
- n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
+ msg->len = size;
+ n = mlx5_calc_cmd_blocks(msg);
for (i = 0; i < n; i++) {
tmp = alloc_cmd_box(dev, flags);
@@ -1165,7 +1168,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
head = tmp;
}
msg->next = head;
- msg->len = size;
return msg;
err_alloc:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index d93ff567b40d..b3820a34e773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,7 +235,7 @@ const char *parse_fs_dst(struct trace_seq *p,
switch (dst->type) {
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- trace_seq_printf(p, "vport=%u\n", dst->vport_num);
+ trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
trace_seq_printf(p, "ft=%p\n", dst->ft);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3317a4da87cb..bc91a7335c93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -55,6 +55,9 @@
struct page_pool;
+#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
+#define MLX5E_METADATA_ETHER_LEN 8
+
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
@@ -241,6 +244,7 @@ struct mlx5e_params {
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
+ bool tx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -284,8 +288,6 @@ enum {
MLX5E_RQ_STATE_AM,
};
-#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
-
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
@@ -330,6 +332,8 @@ enum {
MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
+ MLX5E_SQ_STATE_AM,
+ MLX5E_SQ_STATE_TLS,
};
struct mlx5e_sq_wqe_info {
@@ -342,6 +346,7 @@ struct mlx5e_txqsq {
/* dirtied @completion */
u16 cc;
u32 dma_fifo_cc;
+ struct net_dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -629,7 +634,6 @@ struct mlx5e_flow_table {
struct mlx5e_tc_table {
struct mlx5_flow_table *t;
- struct rhashtable_params ht_params;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
@@ -794,6 +798,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_tls *tls;
+#endif
};
struct mlx5e_profile {
@@ -824,6 +831,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -939,6 +948,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
+static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+ struct mlx5_wq_cyc *wq;
+
+ wq = &sq->wq;
+ *pi = sq->pc & wq->sz_m1;
+ *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(*wqe, 0, sizeof(**wqe));
+}
+
static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
@@ -1096,9 +1117,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
-
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
@@ -1111,4 +1129,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
u16 max_channels, u16 mtu);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
+void mlx5e_tx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
new file mode 100644
index 000000000000..f20074dbef32
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_EN_ACCEL_H__
+#define __MLX5E_EN_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
+#include "en.h"
+
+static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
+ skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
+ if (unlikely(!skb))
+ return NULL;
+ }
+#endif
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
+ skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
+ if (unlikely(!skb))
+ return NULL;
+ }
+#endif
+
+ return skb;
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 1198fc1eba4c..93bf10e6508c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -45,9 +45,6 @@
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
-#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
-#define MLX5E_METADATA_ETHER_LEN 8
-
struct mlx5e_priv;
struct mlx5e_ipsec_sw_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
new file mode 100644
index 000000000000..d167845271c3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <net/ipv6.h>
+#include "en_accel/tls.h"
+#include "accel/tls.h"
+
+static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ MLX5_SET(tls_flow, flow, ipv6, 0);
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ MLX5_SET(tls_flow, flow, ipv6, 1);
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+}
+#endif
+
+static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
+ MLX5_FLD_SZ_BYTES(tls_flow, src_port));
+ memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
+ MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
+}
+
+static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
+{
+ switch (sk->sk_family) {
+ case AF_INET:
+ mlx5e_tls_set_ipv4_flow(flow, sk);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ if (!sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
+ mlx5e_tls_set_ipv4_flow(flow, sk);
+ break;
+ }
+ if (!(caps & MLX5_ACCEL_TLS_IPV6))
+ goto error_out;
+
+ mlx5e_tls_set_ipv6_flow(flow, sk);
+ break;
+#endif
+ default:
+ goto error_out;
+ }
+
+ mlx5e_tls_set_flow_tcp_ports(flow, sk);
+ return 0;
+error_out:
+ return -EINVAL;
+}
+
+static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 caps = mlx5_accel_tls_device_caps(mdev);
+ int ret = -ENOMEM;
+ void *flow;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EINVAL;
+
+ flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
+ if (!flow)
+ return ret;
+
+ ret = mlx5e_tls_set_flow(flow, sk, caps);
+ if (ret)
+ goto free_flow;
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ struct mlx5e_tls_offload_context *tx_ctx =
+ mlx5e_get_tls_tx_context(tls_ctx);
+ u32 swid;
+
+ ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid);
+ if (ret < 0)
+ goto free_flow;
+
+ tx_ctx->swid = htonl(swid);
+ tx_ctx->expected_seq = start_offload_tcp_sn;
+ }
+
+ return 0;
+free_flow:
+ kfree(flow);
+ return ret;
+}
+
+static void mlx5e_tls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+
+ mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
+ } else {
+ netdev_err(netdev, "unsupported direction %d\n", direction);
+ }
+}
+
+static const struct tlsdev_ops mlx5e_tls_ops = {
+ .tls_dev_add = mlx5e_tls_add,
+ .tls_dev_del = mlx5e_tls_del,
+};
+
+void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5_accel_is_tls_device(priv->mdev))
+ return;
+
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &mlx5e_tls_ops;
+}
+
+int mlx5e_tls_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+
+ if (!tls)
+ return -ENOMEM;
+
+ priv->tls = tls;
+ return 0;
+}
+
+void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tls *tls = priv->tls;
+
+ if (!tls)
+ return;
+
+ kfree(tls);
+ priv->tls = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
new file mode 100644
index 000000000000..b6162178f621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#ifndef __MLX5E_TLS_H__
+#define __MLX5E_TLS_H__
+
+#ifdef CONFIG_MLX5_EN_TLS
+
+#include <net/tls.h>
+#include "en.h"
+
+struct mlx5e_tls_sw_stats {
+ atomic64_t tx_tls_drop_metadata;
+ atomic64_t tx_tls_drop_resync_alloc;
+ atomic64_t tx_tls_drop_no_sync_data;
+ atomic64_t tx_tls_drop_bypass_required;
+};
+
+struct mlx5e_tls {
+ struct mlx5e_tls_sw_stats sw_stats;
+};
+
+struct mlx5e_tls_offload_context {
+ struct tls_offload_context base;
+ u32 expected_seq;
+ __be32 swid;
+};
+
+static inline struct mlx5e_tls_offload_context *
+mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
+ TLS_OFFLOAD_CONTEXT_SIZE);
+ return container_of(tls_offload_ctx(tls_ctx),
+ struct mlx5e_tls_offload_context,
+ base);
+}
+
+void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_tls_init(struct mlx5e_priv *priv);
+void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_tls_get_count(struct mlx5e_priv *priv);
+int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+
+#else
+
+static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { }
+static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
+static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
+static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+
+#endif
+
+#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
new file mode 100644
index 000000000000..ad2790fb5966
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "en_accel/tls.h"
+#include "en_accel/tls_rxtx.h"
+
+#define SYNDROME_OFFLOAD_REQUIRED 32
+#define SYNDROME_SYNC 33
+
+struct sync_info {
+ u64 rcd_sn;
+ s32 sync_len;
+ int nr_frags;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct mlx5e_tls_metadata {
+ /* One byte of syndrome followed by 3 bytes of swid */
+ __be32 syndrome_swid;
+ __be16 first_seq;
+ /* packet type ID field */
+ __be16 ethertype;
+} __packed;
+
+static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
+{
+ struct mlx5e_tls_metadata *pet;
+ struct ethhdr *eth;
+
+ if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
+ return -ENOMEM;
+
+ eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
+ skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
+ pet = (struct mlx5e_tls_metadata *)(eth + 1);
+
+ memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
+ 2 * ETH_ALEN);
+
+ eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
+ pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+
+ return 0;
+}
+
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+ u32 tcp_seq, struct sync_info *info)
+{
+ int remaining, i = 0, ret = -EINVAL;
+ struct tls_record_info *record;
+ unsigned long flags;
+ s32 sync_size;
+
+ spin_lock_irqsave(&context->base.lock, flags);
+ record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
+
+ if (unlikely(!record))
+ goto out;
+
+ sync_size = tcp_seq - tls_record_start_seq(record);
+ info->sync_len = sync_size;
+ if (unlikely(sync_size < 0)) {
+ if (tls_record_is_start_marker(record))
+ goto done;
+
+ goto out;
+ }
+
+ remaining = sync_size;
+ while (remaining > 0) {
+ info->frags[i] = record->frags[i];
+ __skb_frag_ref(&info->frags[i]);
+ remaining -= skb_frag_size(&info->frags[i]);
+
+ if (remaining < 0)
+ skb_frag_size_add(&info->frags[i], remaining);
+
+ i++;
+ }
+ info->nr_frags = i;
+done:
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&context->base.lock, flags);
+ return ret;
+}
+
+static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
+ struct sk_buff *nskb, u32 tcp_seq,
+ int headln, __be64 rcd_sn)
+{
+ struct mlx5e_tls_metadata *pet;
+ u8 syndrome = SYNDROME_SYNC;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ int data_len, mss;
+
+ nskb->dev = skb->dev;
+ skb_reset_mac_header(nskb);
+ skb_set_network_header(nskb, skb_network_offset(skb));
+ skb_set_transport_header(nskb, skb_transport_offset(skb));
+ memcpy(nskb->data, skb->data, headln);
+ memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
+
+ iph = ip_hdr(nskb);
+ iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
+ th = tcp_hdr(nskb);
+ data_len = nskb->len - headln;
+ tcp_seq -= data_len;
+ th->seq = htonl(tcp_seq);
+
+ mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
+ skb_shinfo(nskb)->gso_size = 0;
+ if (data_len > mss) {
+ skb_shinfo(nskb)->gso_size = mss;
+ skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
+ }
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+
+ pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
+ memcpy(pet, &syndrome, sizeof(syndrome));
+ pet->first_seq = htons(tcp_seq);
+
+ /* MLX5 devices don't care about the checksum partial start, offset
+ * and pseudo header
+ */
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+
+ nskb->xmit_more = 1;
+ nskb->queue_mapping = skb->queue_mapping;
+}
+
+static struct sk_buff *
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi,
+ struct mlx5e_tls *tls)
+{
+ u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct sync_info info;
+ struct sk_buff *nskb;
+ int linear_len = 0;
+ int headln;
+ int i;
+
+ sq->stats.tls_ooo++;
+
+ if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
+ /* We might get here if a retransmission reaches the driver
+ * after the relevant record is acked.
+ * It should be safe to drop the packet in this case
+ */
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
+ goto err_out;
+ }
+
+ if (unlikely(info.sync_len < 0)) {
+ u32 payload;
+
+ headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload = skb->len - headln;
+ if (likely(payload <= -info.sync_len))
+ /* SKB payload doesn't require offload
+ */
+ return skb;
+
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
+ goto err_out;
+ }
+
+ if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
+ goto err_out;
+ }
+
+ headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ linear_len += headln + sizeof(info.rcd_sn);
+ nskb = alloc_skb(linear_len, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
+ goto err_out;
+ }
+
+ context->expected_seq = tcp_seq + skb->len - headln;
+ skb_put(nskb, linear_len);
+ for (i = 0; i < info.nr_frags; i++)
+ skb_shinfo(nskb)->frags[i] = info.frags[i];
+
+ skb_shinfo(nskb)->nr_frags = info.nr_frags;
+ nskb->data_len = info.sync_len;
+ nskb->len += info.sync_len;
+ sq->stats.tls_resync_bytes += nskb->len;
+ mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
+ cpu_to_be64(info.rcd_sn));
+ mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+ mlx5e_sq_fetch_wqe(sq, wqe, pi);
+ return skb;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context *context;
+ struct tls_context *tls_ctx;
+ u32 expected_seq;
+ int datalen;
+ u32 skb_seq;
+
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != netdev))
+ goto out;
+
+ skb_seq = ntohl(tcp_hdr(skb)->seq);
+ context = mlx5e_get_tls_tx_context(tls_ctx);
+ expected_seq = context->expected_seq;
+
+ if (unlikely(expected_seq != skb_seq)) {
+ skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
+ goto out;
+ }
+
+ if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
+ atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ goto out;
+ }
+
+ context->expected_seq = skb_seq + datalen;
+out:
+ return skb;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
new file mode 100644
index 000000000000..405dfd302225
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5E_TLS_RXTX_H__
+#define __MLX5E_TLS_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_TLS
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi);
+
+#endif /* CONFIG_MLX5_EN_TLS */
+
+#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
new file mode 100644
index 000000000000..01468ec27446
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "accel/tls.h"
+#include "fpga/sdk.h"
+#include "en_accel/tls.h"
+
+static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
+};
+
+#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
+ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+
+#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
+
+int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+{
+ if (!priv->tls)
+ return 0;
+
+ return NUM_TLS_SW_COUNTERS;
+}
+
+int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ unsigned int i, idx = 0;
+
+ if (!priv->tls)
+ return 0;
+
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_tls_sw_stats_desc[i].format);
+
+ return NUM_TLS_SW_COUNTERS;
+}
+
+int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+ int i, idx = 0;
+
+ if (!priv->tls)
+ return 0;
+
+ for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_tls_sw_stats_desc, i);
+
+ return NUM_TLS_SW_COUNTERS;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 610d485c4b03..f64b5e78519b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -565,7 +565,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
- "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+ "Failed to modify aRFS rule destination to rq=%d\n", rxq);
}
static void arfs_handle_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3d46ef48d5b8..c641d5656b2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto out;
-
new_channels.params = priv->channels.params;
mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index 602851ab5b14..d67adf70a97b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -33,16 +33,30 @@
#include <linux/net_dim.h>
#include "en.h"
+static void
+mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder,
+ struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
+{
+ mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
+ dim->state = NET_DIM_START_MEASURE;
+}
+
void mlx5e_rx_dim_work(struct work_struct *work)
{
- struct net_dim *dim = container_of(work, struct net_dim,
- work);
+ struct net_dim *dim = container_of(work, struct net_dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
- struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
- dim->profile_ix);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
+ mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
+}
- dim->state = NET_DIM_START_MEASURE;
+void mlx5e_tx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct net_dim_cq_moder cur_moder =
+ net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+
+ mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 37fd0245b6c1..2b786c4d3dab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -389,14 +389,20 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
+ struct net_dim_cq_moder *rx_moder, *tx_moder;
+
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
- coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec;
- coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
- coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
- coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+ rx_moder = &priv->channels.params.rx_cq_moderation;
+ coal->rx_coalesce_usecs = rx_moder->usec;
+ coal->rx_max_coalesced_frames = rx_moder->pkts;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+
+ tx_moder = &priv->channels.params.tx_cq_moderation;
+ coal->tx_coalesce_usecs = tx_moder->usec;
+ coal->tx_max_coalesced_frames = tx_moder->pkts;
+ coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
return 0;
}
@@ -438,6 +444,7 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal)
{
+ struct net_dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
int err = 0;
@@ -463,11 +470,15 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
- new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
- new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
- new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ rx_moder = &new_channels.params.rx_cq_moderation;
+ rx_moder->usec = coal->rx_coalesce_usecs;
+ rx_moder->pkts = coal->rx_max_coalesced_frames;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+
+ tx_moder = &new_channels.params.tx_cq_moderation;
+ tx_moder->usec = coal->tx_coalesce_usecs;
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
+ new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -475,7 +486,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
+ (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f64dda2bed31..76cc10e44080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -277,7 +277,6 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_cvlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
priv->fs.vlan.active_cvlans_rule[vid] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f10037419978..b5a7580b12fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -42,7 +42,9 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls.h"
#include "accel/ipsec.h"
+#include "accel/tls.h"
#include "vxlan.h"
struct mlx5e_rq_param {
@@ -745,23 +747,24 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
-static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
+ unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;
struct mlx5_wq_ll *wq = &rq->wq;
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
- while (time_before(jiffies, exp_time)) {
+ do {
if (wq->cur_sz >= min_wqes)
return 0;
msleep(20);
- }
+ } while (time_before(jiffies, exp_time));
+
+ netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+ c->ix, rq->rqn, wq->cur_sz, min_wqes);
- netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
- rq->rqn, wq->cur_sz, min_wqes);
return -ETIMEDOUT;
}
@@ -817,7 +820,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_destroy_rq;
if (params->rx_dim_enabled)
- c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
+ __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
return 0;
@@ -1014,6 +1017,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+ if (mlx5_accel_is_tls_device(c->priv->mdev))
+ set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
@@ -1025,6 +1030,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
+ INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
+ sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
+
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
return 0;
@@ -1188,6 +1196,9 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
+ if (params->tx_dim_enabled)
+ sq->state |= BIT(MLX5E_SQ_STATE_AM);
+
return 0;
err_free_txqsq:
@@ -1931,7 +1942,6 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
- param->wq.linear = 1;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -2119,13 +2129,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
int err = 0;
int i;
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
- if (err)
- break;
- }
+ for (i = 0; i < chs->num; i++)
+ err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
+ err ? 0 : 20000);
- return err;
+ return err ? -ETIMEDOUT : 0;
}
static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
@@ -3128,22 +3136,23 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower)
+ struct tc_cls_flower_offload *cls_flower,
+ int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower);
+ return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower);
+ return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower);
+ return mlx5e_stats_flower(priv, cls_flower, flags);
default:
return -EOPNOTSUPP;
}
}
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct mlx5e_priv *priv = cb_priv;
@@ -3152,7 +3161,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@@ -4084,18 +4093,48 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- params->tx_cq_moderation.cq_period_mode = cq_period_mode;
+ struct net_dim_cq_moder moder;
- params->tx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- params->tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ struct net_dim_cq_moder moder;
+
+ moder.cq_period_mode = cq_period_mode;
+ moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- params->tx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+ moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+
+ return moder;
+}
+
+static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
+{
+ return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ if (params->tx_dim_enabled) {
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
+ } else {
+ params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
params->tx_cq_moderation.cq_period_mode ==
@@ -4104,28 +4143,12 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
- params->rx_cq_moderation.cq_period_mode = cq_period_mode;
-
- params->rx_cq_moderation.pkts =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
-
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
if (params->rx_dim_enabled) {
- switch (cq_period_mode) {
- case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- params->rx_cq_moderation =
- net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
- break;
- case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
- default:
- params->rx_cq_moderation =
- net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
- }
+ u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
+
+ params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
+ } else {
+ params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -4189,6 +4212,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
@@ -4355,6 +4379,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#endif
mlx5e_ipsec_build_netdev(priv);
+ mlx5e_tls_build_netdev(priv);
}
static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4396,12 +4421,16 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
+ err = mlx5e_tls_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_vxlan_init(priv);
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
}
@@ -4433,7 +4462,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_direct_tirs;
}
- err = mlx5e_tc_init(priv);
+ err = mlx5e_tc_nic_init(priv);
if (err)
goto err_destroy_flow_steering;
@@ -4454,7 +4483,7 @@ err_destroy_indirect_rqts:
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
- mlx5e_tc_cleanup(priv);
+ mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index d8f68e4d1018..c3034f58aa33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -66,18 +66,36 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
-#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+struct vport_stats {
+ u64 vport_rx_packets;
+ u64 vport_tx_packets;
+ u64 vport_rx_bytes;
+ u64 vport_tx_bytes;
+};
+
+static const struct counter_desc vport_rep_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
+ { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
+};
+
+#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
{
- int i;
+ int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
strcpy(data + (i * ETH_GSTRING_LEN),
sw_rep_stats_desc[i].format);
+ for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
+ strcpy(data + (i * ETH_GSTRING_LEN),
+ vport_rep_stats_desc[j].format);
break;
}
}
@@ -140,7 +158,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i;
+ int i, j;
if (!data)
return;
@@ -148,18 +166,23 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_rep_update_sw_counters(priv);
+ mlx5e_rep_update_hw_counters(priv);
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
+
+ for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
+ data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ vport_rep_stats_desc, j);
}
static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_REP_COUNTERS;
+ return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
default:
return -EOPNOTSUPP;
}
@@ -681,8 +704,8 @@ static int mlx5e_rep_open(struct net_device *dev)
goto unlock;
if (!mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -699,8 +722,8 @@ static int mlx5e_rep_close(struct net_device *dev)
mutex_lock(&priv->state_lock);
mlx5_modify_vport_admin_state(priv->mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -723,15 +746,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *cls_flower)
+ struct tc_cls_flower_offload *cls_flower, int flags)
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower);
+ return mlx5e_configure_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower);
+ return mlx5e_delete_flower(priv, cls_flower, flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower);
+ return mlx5e_stats_flower(priv, cls_flower, flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
default:
return -EOPNOTSUPP;
}
@@ -747,7 +786,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
default:
return -EOPNOTSUPP;
}
@@ -877,13 +916,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
};
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+ struct mlx5e_params *params, u16 mtu)
{
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
+ params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
@@ -931,7 +971,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
priv->channels.params.num_channels = profile->max_nch(mdev);
- mlx5e_build_rep_params(mdev, &priv->channels.params);
+ mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
@@ -964,14 +1004,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
}
rpriv->vport_rx_rule = flow_rule;
- err = mlx5e_tc_init(priv);
- if (err)
- goto err_del_flow_rule;
-
return 0;
-err_del_flow_rule:
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -983,7 +1017,6 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- mlx5e_tc_cleanup(priv);
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
@@ -1041,8 +1074,15 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (err)
goto err_remove_sqs;
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&rpriv->tc_ht);
+ if (err)
+ goto err_neigh_cleanup;
+
return 0;
+err_neigh_cleanup:
+ mlx5e_rep_neigh_cleanup(rpriv);
err_remove_sqs:
mlx5e_remove_sqs_fwd_rules(priv);
return err;
@@ -1057,9 +1097,8 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
- /* clean (and re-init) existing uplink offloaded TC rules */
- mlx5e_tc_cleanup(priv);
- mlx5e_tc_init(priv);
+ /* clean uplink offloaded TC rules, delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
mlx5e_rep_neigh_cleanup(rpriv);
}
@@ -1106,7 +1145,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
- err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+ err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
if (err)
goto err_neigh_cleanup;
@@ -1121,7 +1160,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return 0;
err_egdev_cleanup:
- tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
err_neigh_cleanup:
@@ -1150,7 +1189,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev);
- tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b9b481f2833a..844d32d5c29f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -59,6 +59,7 @@ struct mlx5e_rep_priv {
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
+ struct rhashtable tc_ht; /* valid for uplink rep */
};
static inline
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7bbf0db27a01..53f72923b164 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -450,7 +450,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
struct mlx5_wq_ll *wq = &rq->wq;
int err;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
if (mlx5_wq_ll_is_full(wq))
@@ -508,7 +508,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@ -525,7 +525,7 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
@@ -681,11 +681,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
+ u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
struct net_device *netdev = rq->netdev;
- int lro_num_seg;
skb->mac_len = ETH_HLEN;
- lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -808,9 +807,9 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
}
/* returns true if packet was consumed by xdp */
-static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len)
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
@@ -1133,7 +1132,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5_cqe64 *cqe;
int work_done = 0;
- if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (cq->decmprs_left)
@@ -1186,7 +1185,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sq = container_of(cq, struct mlx5e_xdpsq, cq);
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 707976482c09..027f54ac1ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
netdev_err(priv->netdev,
- "\tCan't perform loobpack test while device is down\n");
+ "\tCan't perform loopback test while device is down\n");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b08c94422907..e17919c0af08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en_accel/ipsec.h"
+#include "en_accel/tls.h"
static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
@@ -43,6 +44,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+#endif
+
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
@@ -161,6 +168,10 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial;
+#ifdef CONFIG_MLX5_EN_TLS
+ s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+#endif
}
}
@@ -1065,6 +1076,22 @@ static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
mlx5e_ipsec_update_stats(priv);
}
+static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_tls_get_count(priv);
+}
+
+static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ return idx + mlx5e_tls_get_stats(priv, data + idx);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1268,6 +1295,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.update_stats = mlx5e_grp_ipsec_update_stats,
},
{
+ .get_num_stats = mlx5e_grp_tls_get_num_stats,
+ .fill_strings = mlx5e_grp_tls_fill_strings,
+ .fill_stats = mlx5e_grp_tls_fill_stats,
+ },
+ {
.get_num_stats = mlx5e_grp_channels_get_num_stats,
.fill_strings = mlx5e_grp_channels_fill_strings,
.fill_stats = mlx5e_grp_channels_fill_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53111a2df587..a36e6a87066b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -93,6 +93,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive;
u64 ch_eq_rearm;
+#ifdef CONFIG_MLX5_EN_TLS
+ u64 tx_tls_ooo;
+ u64 tx_tls_resync_bytes;
+#endif
+
/* Special handling counters */
u64 link_down_events_phy;
};
@@ -194,6 +199,10 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+#ifdef CONFIG_MLX5_EN_TLS
+ u64 tls_ooo;
+ u64 tls_resync_bytes;
+#endif
/* less likely accessed in data path */
u64 csum_none;
u64 stopped;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4197001f9801..674f1d7d2737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -58,19 +58,25 @@ struct mlx5_nic_flow_attr {
u32 flow_tag;
u32 mod_hdr_id;
u32 hairpin_tirn;
+ u8 match_level;
struct mlx5_flow_table *hairpin_ft;
};
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
+
enum {
- MLX5E_TC_FLOW_ESWITCH = BIT(0),
- MLX5E_TC_FLOW_NIC = BIT(1),
- MLX5E_TC_FLOW_OFFLOADED = BIT(2),
- MLX5E_TC_FLOW_HAIRPIN = BIT(3),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
+ MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
+ MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
+ MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
+ MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
+ MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
};
struct mlx5e_tc_flow {
struct rhash_head node;
+ struct mlx5e_priv *priv;
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
@@ -97,7 +103,7 @@ enum {
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -753,7 +759,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
table_created = true;
}
- parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ if (attr->match_level != MLX5_MATCH_NONE)
+ parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
@@ -789,7 +797,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+ if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
@@ -836,6 +844,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
+ attr->out_mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -982,6 +991,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
}
+ if (neigh_used)
+ break;
}
if (neigh_used) {
@@ -1190,7 +1201,7 @@ vxlan_match_offload_err:
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
- u8 *min_inline)
+ u8 *match_level)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
@@ -1199,7 +1210,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
u16 addr_type = 0;
u8 ip_proto = 0;
- *min_inline = MLX5_INLINE_MODE_L2;
+ *match_level = MLX5_MATCH_NONE;
if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -1249,54 +1260,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
-
- struct flow_dissector_key_control *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->mask);
- addr_type = key->addr_type;
-
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
- key->flags & FLOW_DIS_IS_FRAGMENT);
-
- /* the HW doesn't need L3 inline to match on frag=no */
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
- }
-
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- ip_proto = key->ip_proto;
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- mask->ip_proto);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- key->ip_proto);
-
- if (mask->ip_proto)
- *min_inline = MLX5_INLINE_MODE_IP;
- }
-
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_dissector_key_eth_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -1320,6 +1283,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
key->src);
+
+ if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ *match_level = MLX5_MATCH_L2;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -1340,9 +1306,79 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+
+ *match_level = MLX5_MATCH_L2;
}
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(key->n_proto));
+
+ if (mask->n_proto)
+ *match_level = MLX5_MATCH_L2;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ struct flow_dissector_key_control *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->mask);
+ addr_type = key->addr_type;
+
+ /* the HW doesn't support frag first/later */
+ if (mask->flags & FLOW_DIS_FIRST_FRAG)
+ return -EOPNOTSUPP;
+
+ if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+ key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+ *match_level = MLX5_INLINE_MODE_L2;
+ /* *** L2 attributes parsing up to here *** */
+ else
+ *match_level = MLX5_INLINE_MODE_IP;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ ip_proto = key->ip_proto;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ key->ip_proto);
+
+ if (mask->ip_proto)
+ *match_level = MLX5_MATCH_L3;
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -1367,7 +1403,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
&key->dst, sizeof(key->dst));
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -1396,7 +1432,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
@@ -1424,9 +1460,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (mask->tos || mask->ttl)
- *min_inline = MLX5_INLINE_MODE_IP;
+ *match_level = MLX5_MATCH_L3;
}
+ /* *** L3 attributes parsing up to here *** */
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -1467,7 +1505,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mask->src || mask->dst)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
@@ -1486,7 +1524,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
ntohs(key->flags));
if (mask->flags)
- *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ *match_level = MLX5_MATCH_L4;
}
return 0;
@@ -1501,23 +1539,28 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
- u8 min_inline;
+ u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &min_inline);
+ err = __parse_cls_flower(priv, spec, f, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < min_inline)) {
+ esw->offloads.inline_mode < match_level)) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
- min_inline, esw->offloads.inline_mode);
+ match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->match_level = match_level;
+ else
+ flow->nic_attr->match_level = match_level;
+
return err;
}
@@ -1574,7 +1617,6 @@ struct mlx5_fields {
static struct mlx5_fields fields[] = {
OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
@@ -1760,12 +1802,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
+ netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
goto out_err;
}
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
+ netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
goto out_err;
}
@@ -1789,8 +1831,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &masks[cmd];
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
- printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
- cmd);
+ netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
err = -EOPNOTSUPP;
@@ -1864,7 +1905,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
- if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ if (modify_ip_header && ip_proto != IPPROTO_TCP &&
+ ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
return false;
}
@@ -1912,21 +1954,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
LIST_HEAD(actions);
+ u32 action = 0;
int err;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- attr->action = 0;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -1936,13 +1978,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
@@ -1956,8 +1998,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
@@ -1976,13 +2018,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->flow_tag = mark;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
@@ -2039,6 +2082,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
return 0;
}
+static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+ struct net_device *peer_netdev)
+{
+ struct mlx5e_priv *peer_priv;
+
+ peer_priv = netdev_priv(peer_netdev);
+
+ return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
+ (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
+ same_hw_devs(priv, peer_priv) &&
+ MLX5_VPORT_MANAGER(peer_priv->mdev) &&
+ (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
+}
+
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct net_device **out_dev,
@@ -2454,34 +2511,36 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
const struct tc_action *a;
LIST_HEAD(actions);
bool encap = false;
- int err = 0;
+ u32 action = 0;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- memset(attr, 0, sizeof(*attr));
attr->in_rep = rpriv->rep;
+ attr->in_mdev = priv->mdev;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
if (is_tcf_pedit(a)) {
+ int err;
+
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
parse_attr);
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
continue;
}
if (is_tcf_csum(a)) {
- if (csum_offload_supported(priv, attr->action,
+ if (csum_offload_supported(priv, action,
tcf_csum_update_flags(a)))
continue;
@@ -2495,19 +2554,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
- out_dev)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ out_dev) ||
+ is_merged_eswitch_dev(priv, out_dev)) {
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
+ attr->out_mdev = out_priv->mdev;
} else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
@@ -2528,9 +2589,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan_vid = tcf_vlan_push_vid(a);
if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
attr->vlan_prio = tcf_vlan_push_prio(a);
@@ -2548,34 +2609,74 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_tunnel_release(a)) {
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
continue;
}
return -EINVAL;
}
+ attr->action = action;
if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP;
- return err;
+ return 0;
+}
+
+static void get_flags(int flags, u8 *flow_flags)
+{
+ u8 __flow_flags = 0;
+
+ if (flags & MLX5E_TC_INGRESS)
+ __flow_flags |= MLX5E_TC_FLOW_INGRESS;
+ if (flags & MLX5E_TC_EGRESS)
+ __flow_flags |= MLX5E_TC_FLOW_EGRESS;
+
+ *flow_flags = __flow_flags;
+}
+
+static const struct rhashtable_params tc_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_flow, node),
+ .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
+ .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ return &uplink_rpriv->tc_ht;
+ } else
+ return &priv->fs.tc.ht;
}
int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
int attr_size, err = 0;
u8 flow_flags = 0;
+ get_flags(flags, &flow_flags);
+
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
+ return 0;
+ }
+
if (esw && esw->mode == SRIOV_OFFLOADS) {
- flow_flags = MLX5E_TC_FLOW_ESWITCH;
+ flow_flags |= MLX5E_TC_FLOW_ESWITCH;
attr_size = sizeof(struct mlx5_esw_flow_attr);
} else {
- flow_flags = MLX5E_TC_FLOW_NIC;
+ flow_flags |= MLX5E_TC_FLOW_NIC;
attr_size = sizeof(struct mlx5_nic_flow_attr);
}
@@ -2588,6 +2689,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->cookie = f->cookie;
flow->flags = flow_flags;
+ flow->priv = priv;
err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
if (err < 0)
@@ -2618,8 +2720,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
!(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
kvfree(parse_attr);
- err = rhashtable_insert_fast(&tc->ht, &flow->node,
- tc->ht_params);
+ err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err) {
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
@@ -2633,18 +2734,28 @@ err_free:
return err;
}
+#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
+#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
+
+static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
+{
+ if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
+ return true;
+
+ return false;
+}
+
int mlx5e_delete_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
- flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
- tc->ht_params);
- if (!flow)
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
- rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+ rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
mlx5e_tc_del_flow(priv, flow);
@@ -2654,18 +2765,17 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
}
int mlx5e_stats_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f, int flags)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 bytes;
u64 packets;
u64 lastuse;
- flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
- tc->ht_params);
- if (!flow)
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (!flow || !same_flow_direction(flow, flags))
return -EINVAL;
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
@@ -2682,41 +2792,43 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
-static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
- .head_offset = offsetof(struct mlx5e_tc_flow, node),
- .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
- .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
- .automatic_shrinking = true,
-};
-
-int mlx5e_tc_init(struct mlx5e_priv *priv)
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- tc->ht_params = mlx5e_tc_flow_ht_params;
- return rhashtable_init(&tc->ht, &tc->ht_params);
+ return rhashtable_init(&tc->ht, &tc_ht_params);
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
{
struct mlx5e_tc_flow *flow = ptr;
- struct mlx5e_priv *priv = arg;
+ struct mlx5e_priv *priv = flow->priv;
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
}
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
}
+
+int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+{
+ return rhashtable_init(tc_ht, &tc_ht_params);
+}
+
+void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c14c263a739b..59e52b845beb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -38,16 +38,26 @@
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5e_tc_init(struct mlx5e_priv *priv);
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
+
+enum {
+ MLX5E_TC_INGRESS = BIT(0),
+ MLX5E_TC_EGRESS = BIT(1),
+ MLX5E_TC_LAST_EXPORTED_BIT = 1,
+};
+
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
+void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
int mlx5e_delete_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
int mlx5e_stats_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f);
+ struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -64,8 +74,8 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
}
#else /* CONFIG_MLX5_ESWITCH */
-static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 20297108528a..2d3f17da5f5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -35,12 +35,21 @@
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
-#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/en_accel.h"
#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+
+#ifndef CONFIG_MLX5_EN_TLS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
+#else
+/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
+ * enough room for a resync SKB, a normal SKB and a NOP
+ */
+#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+#endif
static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma)
@@ -255,7 +264,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- return -ENOMEM;
+ goto dma_unmap_wqe_err;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
@@ -273,7 +282,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
- return -ENOMEM;
+ goto dma_unmap_wqe_err;
dseg->addr = cpu_to_be64(dma_addr);
dseg->lkey = sq->mkey_be;
@@ -285,6 +294,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
return num_dma;
+
+dma_unmap_wqe_err:
+ mlx5e_dma_unmap_wqe_err(sq, num_dma);
+ return -ENOMEM;
}
static inline void
@@ -325,8 +338,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
}
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi)
+netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
@@ -380,17 +393,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
if (unlikely(num_dma < 0))
- goto dma_unmap_wqe_err;
+ goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
-dma_unmap_wqe_err:
+err_drop:
sq->stats.dropped++;
- mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
-
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -399,21 +410,19 @@ dma_unmap_wqe_err:
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = sq->pc & wq->sz_m1;
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_tx_wqe *wqe;
+ struct mlx5e_txqsq *sq;
+ u16 pi;
- memset(wqe, 0, sizeof(*wqe));
+ sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
- }
+#ifdef CONFIG_MLX5_ACCEL
+ /* might send skbs and update wqe and pi */
+ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
+ if (unlikely(!skb))
+ return NETDEV_TX_OK;
#endif
-
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -441,7 +450,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_txqsq, cq);
- if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@ -645,17 +654,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
if (unlikely(num_dma < 0))
- goto dma_unmap_wqe_err;
+ goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
num_bytes, num_dma, wi, cseg);
return NETDEV_TX_OK;
-dma_unmap_wqe_err:
+err_drop:
sq->stats.dropped++;
- mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
-
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index f292bb346985..5d6f9ce2bf80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -44,6 +44,30 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
return cpumask_test_cpu(current_cpu, aff);
}
+static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
+{
+ struct net_dim_sample dim_sample;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
+ return;
+
+ net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes,
+ &dim_sample);
+ net_dim(&sq->dim, dim_sample);
+}
+
+static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
+{
+ struct net_dim_sample dim_sample;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
+ return;
+
+ net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes,
+ &dim_sample);
+ net_dim(&rq->dim, dim_sample);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -75,18 +99,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
- for (i = 0; i < c->num_tc; i++)
+ for (i = 0; i < c->num_tc; i++) {
+ mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
-
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
- struct net_dim_sample dim_sample;
- net_dim_sample(c->rq.cq.event_ctr,
- c->rq.stats.packets,
- c->rq.stats.bytes,
- &dim_sample);
- net_dim(&c->rq.dim, dim_sample);
}
+ mlx5e_handle_rx_dim(&c->rq);
+
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c1c94974e16b..1814f803bd2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -34,6 +34,9 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include "mlx5_core.h"
#include "fpga/core.h"
#include "eswitch.h"
@@ -923,3 +926,28 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
+
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (dev->rmap) {
+ free_irq_cpu_rmap(dev->rmap);
+ dev->rmap = NULL;
+ }
+#endif
+ list_for_each_entry(eq, &table->comp_eqs_list, list)
+ free_irq(eq->irqn, eq);
+
+ free_irq(table->pages_eq.irqn, &table->pages_eq);
+ free_irq(table->async_eq.irqn, &table->async_eq);
+ free_irq(table->cmd_eq.irqn, &table->cmd_eq);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ if (MLX5_CAP_GEN(dev, pg))
+ free_irq(table->pfault_eq.irqn, &table->pfault_eq);
+#endif
+ pci_free_irq_vectors(dev->pdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 332bc56306bf..09f0e11c6ffc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -192,7 +192,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = vport;
+ dest.vport.num = vport;
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
@@ -2175,26 +2175,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
memset(vf_stats, 0, sizeof(*vf_stats));
vf_stats->rx_packets =
MLX5_GET_CTR(out, received_eth_unicast.packets) +
+ MLX5_GET_CTR(out, received_ib_unicast.packets) +
MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets) +
MLX5_GET_CTR(out, received_eth_broadcast.packets);
vf_stats->rx_bytes =
MLX5_GET_CTR(out, received_eth_unicast.octets) +
+ MLX5_GET_CTR(out, received_ib_unicast.octets) +
MLX5_GET_CTR(out, received_eth_multicast.octets) +
+ MLX5_GET_CTR(out, received_ib_multicast.octets) +
MLX5_GET_CTR(out, received_eth_broadcast.octets);
vf_stats->tx_packets =
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
vf_stats->tx_bytes =
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
vf_stats->multicast =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
+ MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets);
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 4cd773fa55e3..f47a14e31b7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -227,9 +227,18 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
+enum mlx5_flow_match_level {
+ MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
+ MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
+ MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
+ MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
+ struct mlx5_core_dev *out_mdev;
+ struct mlx5_core_dev *in_mdev;
int action;
__be16 vlan_proto;
@@ -238,6 +247,7 @@ struct mlx5_esw_flow_attr {
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
+ u8 match_level;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 35e256eb2f6e..b9ea464bcfa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -71,7 +71,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport_num = attr->out_rep->vport;
+ dest[i].vport.num = attr->out_rep->vport;
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ dest[i].vport.vhca_id =
+ MLX5_CAP_GEN(attr->out_mdev, vhca_id);
+ dest[i].vport.vhca_id_valid = 1;
+ }
i++;
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
@@ -88,11 +93,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ if (attr->match_level == MLX5_MATCH_NONE)
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ else
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
@@ -343,7 +360,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = vport;
+ dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
@@ -387,7 +404,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_c[0] = 0x01;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = 0;
+ dest.vport.num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
@@ -663,7 +680,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
esw->offloads.vport_rx_group = g;
out:
- kfree(flow_group_in);
+ kvfree(flow_group_in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 82405ed84725..3e2355c8df3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -53,6 +53,7 @@ struct mlx5_fpga_device {
} conn_res;
struct mlx5_fpga_ipsec *ipsec;
+ struct mlx5_fpga_tls *tls;
};
#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..3c4f1f326e13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -43,9 +43,6 @@
#include "fpga/sdk.h"
#include "fpga/core.h"
-#define SBU_QP_QUEUE_SIZE 8
-#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
-
enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_PENDING,
MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
@@ -258,7 +255,7 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout =
- msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
+ msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
int res;
res = wait_for_completion_timeout(&context->complete, timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
index baa537e54a49..a0573cc2fc9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -41,6 +41,8 @@
* DOC: Innova SDK
* This header defines the in-kernel API for Innova FPGA client drivers.
*/
+#define SBU_QP_QUEUE_SIZE 8
+#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000)
enum mlx5_fpga_access_type {
MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
new file mode 100644
index 000000000000..21048013826c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx5/device.h>
+#include "fpga/tls.h"
+#include "fpga/cmd.h"
+#include "fpga/sdk.h"
+#include "fpga/core.h"
+#include "accel/tls.h"
+
+struct mlx5_fpga_tls_command_context;
+
+typedef void (*mlx5_fpga_tls_command_complete)
+ (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *ctx,
+ struct mlx5_fpga_dma_buf *resp);
+
+struct mlx5_fpga_tls_command_context {
+ struct list_head list;
+ /* There is no guarantee on the order between the TX completion
+ * and the command response.
+ * The TX completion is going to touch cmd->buf even in
+ * the case of successful transmission.
+ * So instead of requiring separate allocations for cmd
+ * and cmd->buf we've decided to use a reference counter
+ */
+ refcount_t ref;
+ struct mlx5_fpga_dma_buf buf;
+ mlx5_fpga_tls_command_complete complete;
+};
+
+static void
+mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
+{
+ if (refcount_dec_and_test(&ctx->ref))
+ kfree(ctx);
+}
+
+static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_fpga_conn *conn = fdev->tls->conn;
+ struct mlx5_fpga_tls_command_context *ctx;
+ struct mlx5_fpga_tls *tls = fdev->tls;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tls->pending_cmds_lock, flags);
+ ctx = list_first_entry(&tls->pending_cmds,
+ struct mlx5_fpga_tls_command_context, list);
+ list_del(&ctx->list);
+ spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
+ ctx->complete(conn, fdev, ctx, resp);
+}
+
+static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf,
+ u8 status)
+{
+ struct mlx5_fpga_tls_command_context *ctx =
+ container_of(buf, struct mlx5_fpga_tls_command_context, buf);
+
+ mlx5_fpga_tls_put_command_ctx(ctx);
+
+ if (unlikely(status))
+ mlx5_fpga_tls_cmd_complete(fdev, NULL);
+}
+
+static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ mlx5_fpga_tls_command_complete complete)
+{
+ struct mlx5_fpga_tls *tls = fdev->tls;
+ unsigned long flags;
+ int ret;
+
+ refcount_set(&cmd->ref, 2);
+ cmd->complete = complete;
+ cmd->buf.complete = mlx5_fpga_cmd_send_complete;
+
+ spin_lock_irqsave(&tls->pending_cmds_lock, flags);
+ /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
+ * to make sure commands are inserted to the tls->pending_cmds list
+ * and the command QP in the same order.
+ */
+ ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
+ if (likely(!ret))
+ list_add_tail(&cmd->list, &tls->pending_cmds);
+ else
+ complete(tls->conn, fdev, cmd, NULL);
+ spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
+}
+
+/* Start of context identifiers range (inclusive) */
+#define SWID_START 0
+/* End of context identifiers range (exclusive) */
+#define SWID_END BIT(24)
+
+static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
+ void *ptr)
+{
+ int ret;
+
+ /* TLS metadata format is 1 byte for syndrome followed
+ * by 3 bytes of swid (software ID)
+ * swid must not exceed 3 bytes.
+ * See tls_rxtx.c:insert_pet() for details
+ */
+ BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(idr_spinlock);
+ ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
+ spin_unlock_irq(idr_spinlock);
+ idr_preload_end();
+
+ return ret;
+}
+
+static void mlx5_fpga_tls_release_swid(struct idr *idr,
+ spinlock_t *idr_spinlock, u32 swid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(idr_spinlock, flags);
+ idr_remove(idr, swid);
+ spin_unlock_irqrestore(idr_spinlock, flags);
+}
+
+struct mlx5_teardown_stream_context {
+ struct mlx5_fpga_tls_command_context cmd;
+ u32 swid;
+};
+
+static void
+mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_teardown_stream_context *ctx =
+ container_of(cmd, struct mlx5_teardown_stream_context, cmd);
+
+ if (resp) {
+ u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
+
+ if (syndrome)
+ mlx5_fpga_err(fdev,
+ "Teardown stream failed with syndrome = %d",
+ syndrome);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
+ &fdev->tls->idr_spinlock,
+ ctx->swid);
+ }
+ mlx5_fpga_tls_put_command_ctx(cmd);
+}
+
+static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
+{
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
+ MLX5_BYTE_OFF(tls_flow, ipv6));
+
+ MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
+ MLX5_SET(tls_cmd, cmd, direction_sx,
+ MLX5_GET(tls_flow, flow, direction_sx));
+}
+
+void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow,
+ u32 swid, gfp_t flags)
+{
+ struct mlx5_teardown_stream_context *ctx;
+ struct mlx5_fpga_dma_buf *buf;
+ void *cmd;
+
+ ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
+ if (!ctx)
+ return;
+
+ buf = &ctx->cmd.buf;
+ cmd = (ctx + 1);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
+ MLX5_SET(tls_cmd, cmd, swid, swid);
+
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ kfree(flow);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+
+ ctx->swid = swid;
+ mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_teardown_completion);
+}
+
+void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags)
+{
+ struct mlx5_fpga_tls *tls = mdev->fpga->tls;
+ void *flow;
+
+ rcu_read_lock();
+ flow = idr_find(&tls->tx_idr, swid);
+ rcu_read_unlock();
+
+ if (!flow) {
+ mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
+ swid);
+ return;
+ }
+
+ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
+}
+
+enum mlx5_fpga_setup_stream_status {
+ MLX5_FPGA_CMD_PENDING,
+ MLX5_FPGA_CMD_SEND_FAILED,
+ MLX5_FPGA_CMD_RESPONSE_RECEIVED,
+ MLX5_FPGA_CMD_ABANDONED,
+};
+
+struct mlx5_setup_stream_context {
+ struct mlx5_fpga_tls_command_context cmd;
+ atomic_t status;
+ u32 syndrome;
+ struct completion comp;
+};
+
+static void
+mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_tls_command_context *cmd,
+ struct mlx5_fpga_dma_buf *resp)
+{
+ struct mlx5_setup_stream_context *ctx =
+ container_of(cmd, struct mlx5_setup_stream_context, cmd);
+ int status = MLX5_FPGA_CMD_SEND_FAILED;
+ void *tls_cmd = ctx + 1;
+
+ /* If we failed to send to command resp == NULL */
+ if (resp) {
+ ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
+ status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
+ }
+
+ status = atomic_xchg_release(&ctx->status, status);
+ if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
+ complete(&ctx->comp);
+ return;
+ }
+
+ mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
+ ctx->syndrome);
+
+ if (!ctx->syndrome) {
+ /* The process was killed while waiting for the context to be
+ * added, and the add completed successfully.
+ * We need to destroy the HW context, and we can't can't reuse
+ * the command context because we might not have received
+ * the tx completion yet.
+ */
+ mlx5_fpga_tls_del_tx_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC);
+ }
+
+ mlx5_fpga_tls_put_command_ctx(cmd);
+}
+
+static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
+ struct mlx5_setup_stream_context *ctx)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ void *cmd = ctx + 1;
+ int status, ret = 0;
+
+ buf = &ctx->cmd.buf;
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
+
+ init_completion(&ctx->comp);
+ atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
+ ctx->syndrome = -1;
+
+ mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_setup_completion);
+ wait_for_completion_killable(&ctx->comp);
+
+ status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
+ if (unlikely(status == MLX5_FPGA_CMD_PENDING))
+ /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
+ return -EINTR;
+
+ if (unlikely(ctx->syndrome))
+ ret = -ENOMEM;
+
+ mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
+ return ret;
+}
+
+static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
+ struct mlx5_fpga_dma_buf *buf)
+{
+ struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
+
+ mlx5_fpga_tls_cmd_complete(fdev, buf);
+}
+
+bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
+ MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
+ MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
+ return false;
+
+ if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
+ return false;
+
+ return true;
+}
+
+static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
+ u32 *p_caps)
+{
+ int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
+ u32 caps = 0;
+ void *buf;
+
+ buf = kzalloc(cap_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
+ if (err)
+ goto out;
+
+ if (MLX5_GET(tls_extended_cap, buf, tx))
+ caps |= MLX5_ACCEL_TLS_TX;
+ if (MLX5_GET(tls_extended_cap, buf, rx))
+ caps |= MLX5_ACCEL_TLS_RX;
+ if (MLX5_GET(tls_extended_cap, buf, tls_v12))
+ caps |= MLX5_ACCEL_TLS_V12;
+ if (MLX5_GET(tls_extended_cap, buf, tls_v13))
+ caps |= MLX5_ACCEL_TLS_V13;
+ if (MLX5_GET(tls_extended_cap, buf, lro))
+ caps |= MLX5_ACCEL_TLS_LRO;
+ if (MLX5_GET(tls_extended_cap, buf, ipv6))
+ caps |= MLX5_ACCEL_TLS_IPV6;
+
+ if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
+ caps |= MLX5_ACCEL_TLS_AES_GCM128;
+ if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
+ caps |= MLX5_ACCEL_TLS_AES_GCM256;
+
+ *p_caps = caps;
+ err = 0;
+out:
+ kfree(buf);
+ return err;
+}
+
+int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ struct mlx5_fpga_conn_attr init_attr = {0};
+ struct mlx5_fpga_conn *conn;
+ struct mlx5_fpga_tls *tls;
+ int err = 0;
+
+ if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
+ return 0;
+
+ tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+ if (!tls)
+ return -ENOMEM;
+
+ err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
+ if (err)
+ goto error;
+
+ if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
+ MLX5_ACCEL_TLS_AES_GCM128))) {
+ err = -ENOTSUPP;
+ goto error;
+ }
+
+ init_attr.rx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.tx_size = SBU_QP_QUEUE_SIZE;
+ init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
+ init_attr.cb_arg = fdev;
+ conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
+ if (IS_ERR(conn)) {
+ err = PTR_ERR(conn);
+ mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
+ err);
+ goto error;
+ }
+
+ tls->conn = conn;
+ spin_lock_init(&tls->pending_cmds_lock);
+ INIT_LIST_HEAD(&tls->pending_cmds);
+
+ idr_init(&tls->tx_idr);
+ spin_lock_init(&tls->idr_spinlock);
+ fdev->tls = tls;
+ return 0;
+
+error:
+ kfree(tls);
+ return err;
+}
+
+void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+
+ if (!fdev || !fdev->tls)
+ return;
+
+ mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
+ kfree(fdev->tls);
+ fdev->tls = NULL;
+}
+
+static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
+ struct tls_crypto_info *info,
+ __be64 *rcd_sn)
+{
+ struct tls12_crypto_info_aes_gcm_128 *crypto_info =
+ (struct tls12_crypto_info_aes_gcm_128 *)info;
+
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
+ crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
+ crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+
+ /* in AES-GCM 128 we need to write the key twice */
+ memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
+ TLS_CIPHER_AES_GCM_128_KEY_SIZE,
+ crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+
+ MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
+}
+
+static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
+ struct tls_crypto_info *crypto_info)
+{
+ __be64 rcd_sn;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
+ return -EINVAL;
+ mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info, u32 swid,
+ u32 tcp_sn)
+{
+ u32 caps = mlx5_fpga_tls_device_caps(mdev);
+ struct mlx5_setup_stream_context *ctx;
+ int ret = -ENOMEM;
+ size_t cmd_size;
+ void *cmd;
+
+ cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
+ ctx = kzalloc(cmd_size, GFP_KERNEL);
+ if (!ctx)
+ goto out;
+
+ cmd = ctx + 1;
+ ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
+ if (ret)
+ goto free_ctx;
+
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, swid);
+ MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
+
+ return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
+
+free_ctx:
+ kfree(ctx);
+out:
+ return ret;
+}
+
+int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid)
+{
+ struct mlx5_fpga_tls *tls = mdev->fpga->tls;
+ int ret = -ENOMEM;
+ u32 swid;
+
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (ret < 0)
+ return ret;
+
+ swid = ret;
+ MLX5_SET(tls_flow, flow, direction_sx, 1);
+
+ ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
+ if (ret && ret != -EINTR)
+ goto free_swid;
+
+ *p_swid = swid;
+ return 0;
+free_swid:
+ mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
new file mode 100644
index 000000000000..800a214e4e49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __MLX5_FPGA_TLS_H__
+#define __MLX5_FPGA_TLS_H__
+
+#include <linux/mlx5/driver.h>
+
+#include <net/tls.h>
+#include "fpga/core.h"
+
+struct mlx5_fpga_tls {
+ struct list_head pending_cmds;
+ spinlock_t pending_cmds_lock; /* Protects pending_cmds */
+ u32 caps;
+ struct mlx5_fpga_conn *conn;
+
+ struct idr tx_idr;
+ spinlock_t idr_spinlock; /* protects the IDR */
+};
+
+int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid);
+
+void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags);
+
+bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
+int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
+
+static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
+{
+ return mdev->fpga->tls->caps;
+}
+
+#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ef5afd7c9325..5a00deff5457 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -372,6 +372,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (dst->dest_attr.type ==
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
id = dst->dest_attr.ft->id;
+ } else if (dst->dest_attr.type ==
+ MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+ id = dst->dest_attr.vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ dst->dest_attr.vport.vhca_id_valid);
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dst->dest_attr.vport.vhca_id);
} else {
id = dst->dest_attr.tir_num;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index de51e7c39bc8..806e95523f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node);
static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
static struct mlx5_flow_rule *
find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
@@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
update_fte = true;
goto out;
@@ -1372,6 +1374,8 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
+ u8 src_esw_owner_mask_on;
+ void *misc;
int err;
u32 *in;
@@ -1384,6 +1388,14 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
fg->max_ftes - 1);
+
+ misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
+ misc_parameters);
+ src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in,
+ source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
+
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
memcpy(match_criteria_addr, fg->mask.match_criteria,
@@ -1404,7 +1416,7 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
- d1->vport_num == d2->vport_num) ||
+ d1->vport.num == d2->vport.num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -2351,23 +2363,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
static int init_root_ns(struct mlx5_flow_steering *steering)
{
+ int err;
+
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
- goto cleanup;
+ return -ENOMEM;
- if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
- goto cleanup;
+ err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+ if (err)
+ goto out_err;
set_prio_attrs(steering->root_ns);
-
- if (create_anchor_flow_table(steering))
- goto cleanup;
+ err = create_anchor_flow_table(steering);
+ if (err)
+ goto out_err;
return 0;
-cleanup:
- mlx5_cleanup_fs(steering->dev);
- return -ENOMEM;
+out_err:
+ cleanup_root_ns(steering->root_ns);
+ steering->root_ns = NULL;
+ return err;
}
static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 63a8ea31601c..615005e63819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
+#include "accel/tls.h"
#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1190,6 +1191,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_ipsec_start;
}
+ err = mlx5_accel_tls_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "TLS device start failed %d\n", err);
+ goto err_tls_start;
+ }
+
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1231,6 +1238,9 @@ err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
+ mlx5_accel_tls_cleanup(dev);
+
+err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
err_ipsec_start:
@@ -1306,6 +1316,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
@@ -1587,6 +1598,14 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
mlx5_enter_error_state(dev, true);
+ /* Some platforms requiring freeing the IRQ's in the shutdown
+ * flow. If they aren't freed they can't be allocated after
+ * kexec. There is no need to cleanup the mlx5_core software
+ * contexts.
+ */
+ mlx5_irq_clear_affinity_hints(dev);
+ mlx5_core_eq_free_irqs(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 7d001fe6e631..023882d9a22e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -128,6 +128,8 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 *out, int outlen);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index b9736f505bdf..f4f02f775c93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -123,8 +123,8 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
write_unlock_irqrestore(&table->lock, flags);
if (!deleted_mkey) {
- mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
- mlx5_base_mkey(mkey->key));
+ mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n",
+ mlx5_base_mkey(mkey->key));
return -ENOENT;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 02d6c5b5d502..4ca07bfb6b14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -407,21 +407,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
case MLX5_CMD_OP_RST2INIT_QP:
if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
- MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
- break;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc);
+ break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 177e076b8d17..719cecb182c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -511,7 +511,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
- kfree(out);
+ kvfree(out);
return 0;
}
@@ -531,7 +531,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
- kfree(out);
+ kvfree(out);
return 0;
}
@@ -587,7 +587,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
- kfree(out);
+ kvfree(out);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index fca90b94596d..f3dfa0ca3c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -38,7 +38,6 @@
#include <linux/mlx5/qp.h>
struct mlx5_wq_param {
- int linear;
int buf_numa_node;
int db_numa_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 479511cf79bc..8da91b023b13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
/* cmd_mbox_query_aq_cap_log_max_cq_sz
- * Log (base 2) of max CQEs allowed on CQ.
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
*/
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
+ * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
+
/* cmd_mbox_query_aq_cap_max_num_cqs
* Maximum number of CQs.
*/
@@ -662,6 +667,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+/* cmd_mbox_config_set_cqe_version
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -841,6 +852,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+/* cmd_mbox_config_profile_cqe_version
+ * CQE version:
+ * 0: CQE version is 0
+ * 1: CQE version is either 1 or 2
+ * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
+
/* ACCESS_REG - Access EMAD Supported Register
* ----------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -1032,11 +1051,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_sw2hw_cq_cv
+enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
+};
+
+/* cmd_mbox_sw2hw_cq_cqe_ver
* CQE Version.
- * 0 - CQE Version 0, 1 - CQE Version 1
*/
-MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
/* cmd_mbox_sw2hw_cq_c_eqn
* Event Queue this CQ reports completion events to.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 93ea56620a24..a38faec45b30 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,11 +1100,11 @@ err_emad_init:
err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core);
err_ports_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
if (!reload)
devlink_resources_unregister(devlink, NULL);
err_register_resources:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
if (!reload)
devlink_free(devlink);
err_devlink_alloc:
@@ -1714,15 +1714,16 @@ EXPORT_SYMBOL(mlxsw_core_port_fini);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv, struct net_device *dev,
- bool split, u32 split_group)
+ u32 port_number, bool split,
+ u32 split_port_subnumber)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- if (split)
- devlink_port_split_set(devlink_port, split_group);
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber);
devlink_port_type_eth_set(devlink_port, dev);
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
@@ -1762,6 +1763,17 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_type_get);
+int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
+ u8 local_port, char *name, size_t len)
+{
+ struct mlxsw_core_port *mlxsw_core_port =
+ &mlxsw_core->ports[local_port];
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ return devlink_port_get_phys_port_name(devlink_port, name, len);
+}
+EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 092d39399f3c..4eac7fbd07d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -201,13 +201,16 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv, struct net_device *dev,
- bool split, u32 split_group);
+ u32 port_number, bool split,
+ u32 split_port_subnumber);
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
+ u8 local_port, char *name, size_t len);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 3a9381977d6d..db794a1a3a7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
struct {
u32 comp_sdq_count;
u32 comp_rdq_count;
+ enum mlxsw_pci_cqe_v v;
} cq;
struct {
u32 ev_cmd_count;
@@ -155,6 +156,8 @@ struct mlxsw_pci {
} cmd;
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
+ enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
+ u8 num_sdq_cqs; /* Number of CQs used for SDQs */
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
return owner_bit != !!(q->consumer_counter & q->count);
}
-static char *
-mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
- u32 (*get_elem_owner_func)(const char *))
-{
- struct mlxsw_pci_queue_elem_info *elem_info;
- char *elem;
- bool owner_bit;
-
- elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- elem = elem_info->elem;
- owner_bit = get_elem_owner_func(elem);
- if (mlxsw_pci_elem_hw_owned(q, owner_bit))
- return NULL;
- q->consumer_counter++;
- rmb(); /* make sure we read owned bit before the rest of elem */
- return elem;
-}
-
static struct mlxsw_pci_queue_type_group *
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type)
@@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
}
}
+static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ q->u.cq.v = mlxsw_pci->max_cqe_ver;
+
+ /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
+ if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
+ q->num < mlxsw_pci->num_sdq_cqs)
+ q->u.cq.v = MLXSW_PCI_CQE_V1;
+}
+
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
char *elem = mlxsw_pci_queue_elem_get(q, i);
- mlxsw_pci_cqe_owner_set(elem, 1);
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
}
- mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
@@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
- char *cqe)
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- if (mlxsw_pci_cqe_lag_get(cqe)) {
+ if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
- rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
- rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
+ rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
+ rx_info.lag_port_index =
+ mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
} else {
rx_info.is_lag = false;
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
@@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe))
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
{
- return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
}
static void mlxsw_pci_cq_tasklet(unsigned long data)
@@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
- u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
- u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
if (sendq) {
struct mlxsw_pci_queue *sdq;
@@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, cqe);
+ wqe_counter, q->u.cq.v, cqe);
q->u.cq.comp_rdq_count++;
}
if (++items == credits)
@@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
}
}
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
+}
+
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
+}
+
static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
@@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
{
- return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = mlxsw_pci_eqe_owner_get(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
}
static void mlxsw_pci_eq_tasklet(unsigned long data)
@@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
struct mlxsw_pci_queue_ops {
const char *name;
enum mlxsw_pci_queue_type type;
+ void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
void (*tasklet)(unsigned long data);
+ u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
+ u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
u8 elem_size;
};
@@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .pre_init = mlxsw_pci_cq_pre_init,
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
.tasklet = mlxsw_pci_cq_tasklet,
- .elem_count = MLXSW_PCI_CQE_COUNT,
- .elem_size = MLXSW_PCI_CQE_SIZE
+ .elem_count_f = mlxsw_pci_cq_elem_count,
+ .elem_size_f = mlxsw_pci_cq_elem_size
};
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
int i;
int err;
- spin_lock_init(&q->lock);
q->num = q_num;
- q->count = q_ops->elem_count;
- q->elem_size = q_ops->elem_size;
+ if (q_ops->pre_init)
+ q_ops->pre_init(mlxsw_pci, q);
+
+ spin_lock_init(&q->lock);
+ q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
+ q_ops->elem_count;
+ q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
+ q_ops->elem_size;
q->type = q_ops->type;
q->pci = mlxsw_pci;
@@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
elem_info->elem =
- __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
}
mlxsw_cmd_mbox_zero(mbox);
@@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
u8 rdq_log2sz;
u8 num_cqs;
u8 cq_log2sz;
+ u8 cqv2_log2sz;
u8 num_eqs;
u8 eq_log2sz;
int err;
@@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
@@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
- (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
+ (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
+ (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
return -EINVAL;
}
+ mlxsw_pci->num_sdq_cqs = num_sdqs;
+
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
num_eqs);
if (err) {
@@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
&profile->swid_config[i]);
+ if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
+ }
+
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
@@ -1378,6 +1436,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
+ if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
+ else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
+ else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
+ MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
+ mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
+ } else {
+ dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
+ goto err_cqe_v_check;
+ }
+
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
if (err)
goto err_config_profile;
@@ -1400,6 +1473,7 @@ err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
err_config_profile:
+err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index fb082ad21b00..963155f6a17a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -82,10 +82,12 @@
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
-#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
-#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
+#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
@@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
*/
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+enum mlxsw_pci_cqe_v {
+ MLXSW_PCI_CQE_V0,
+ MLXSW_PCI_CQE_V1,
+ MLXSW_PCI_CQE_V2,
+};
+
+#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
+static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V1: \
+ return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
+ case MLXSW_PCI_CQE_V2: \
+ return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
+ } \
+} \
+static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
+ char *cqe, u32 val) \
+{ \
+ switch (v) { \
+ default: \
+ case MLXSW_PCI_CQE_V0: \
+ mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V1: \
+ mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
+ break; \
+ case MLXSW_PCI_CQE_V2: \
+ mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
+ break; \
+ } \
+}
+
/* pci_cqe_lag
* Packet arrives from a port which is a LAG
*/
-MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
+MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
+mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
/* pci_cqe_system_port/lag_id
* When lag=0: System port on which the packet was received
@@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
* bits [3:0] sub_port on which the packet was received
*/
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
-MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
-MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
+MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
+MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
+mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
+MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
+MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
+mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
/* pci_cqe_wqe_counter
* WQE count of the WQEs completed on the associated dqn
@@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
* Length include CRC. Indicates the length field includes
* the packet's CRC.
*/
-MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
+MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
+mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
/* pci_cqe_e
* CQE with Error.
*/
-MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
+MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
+mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
/* pci_cqe_sr
* 1 - Send Queue
* 0 - Receive Queue
*/
-MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
+MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
+mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
/* pci_cqe_dqn
* Descriptor Queue (DQ) Number.
*/
-MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
+MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
+mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
/* pci_cqe_owner
* Ownership bit.
*/
-MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
+MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
+mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
/* pci_eqe_event_type
* Event type.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6218231e379e..3f4d7e22cece 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6833,6 +6833,12 @@ enum mlxsw_reg_mpat_span_type {
*/
MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+ /* Remote SPAN Ethernet VLAN.
+ * The packet is forwarded to the monitoring port on the monitoring
+ * VLAN.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH = 0x1,
+
/* Encapsulated Remote SPAN Ethernet L3 GRE.
* The packet is encapsulated with GRE header.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 087aad52c195..fd9299ccec72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,6 +43,9 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
+ MLXSW_RES_ID_CQE_V0,
+ MLXSW_RES_ID_CQE_V1,
+ MLXSW_RES_ID_CQE_V2,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
@@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
+ [MLXSW_RES_ID_CQE_V0] = 0x2210,
+ [MLXSW_RES_ID_CQE_V1] = 0x2211,
+ [MLXSW_RES_ID_CQE_V2] = 0x2212,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ca38a30fbe91..bb252b36994d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -441,29 +441,29 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
-int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
- u8 state)
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- enum mlxsw_reg_spms_state spms_state;
- char *spms_pl;
- int err;
-
switch (state) {
case BR_STATE_FORWARDING:
- spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
- break;
+ return MLXSW_REG_SPMS_STATE_FORWARDING;
case BR_STATE_LEARNING:
- spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
- break;
+ return MLXSW_REG_SPMS_STATE_LEARNING;
case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING:
- spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
- break;
+ return MLXSW_REG_SPMS_STATE_DISCARDING;
default:
BUG();
}
+}
+
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
@@ -1238,21 +1238,10 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- u8 module = mlxsw_sp_port->mapping.module;
- u8 width = mlxsw_sp_port->mapping.width;
- u8 lane = mlxsw_sp_port->mapping.lane;
- int err;
-
- if (!mlxsw_sp_port->split)
- err = snprintf(name, len, "p%d", module + 1);
- else
- err = snprintf(name, len, "p%ds%d", module + 1,
- lane / width);
-
- if (err >= len)
- return -EINVAL;
- return 0;
+ return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
+ mlxsw_sp_port->local_port,
+ name, len);
}
static struct mlxsw_sp_port_mall_tc_entry *
@@ -2927,8 +2916,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev, mlxsw_sp_port->split,
- module);
+ mlxsw_sp_port, dev, module + 1,
+ mlxsw_sp_port->split, lane / width);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
@@ -3666,6 +3655,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_lag_init;
}
+ /* Initialize SPAN before router and switchdev, so that those components
+ * can call mlxsw_sp_span_respin().
+ */
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
err = mlxsw_sp_switchdev_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
@@ -3684,15 +3682,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
- err = mlxsw_sp_span_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
- goto err_span_init;
- }
-
- /* Initialize router after SPAN is initialized, so that the FIB and
- * neighbor event handlers can issue SPAN respin.
- */
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3739,14 +3728,14 @@ err_acl_init:
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- mlxsw_sp_span_fini(mlxsw_sp);
-err_span_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
err_counter_pool_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -3768,10 +3757,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
- mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 804d4d2c8031..4a519d8edec8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -364,6 +364,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
+enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state);
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index b85b15851841..77b2adb29341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4705,17 +4705,17 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
* are trapped to the CPU, so no need to program specific routes
* for them.
*/
- if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
+ if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
- if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
+ if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
return true;
/* Cloned routes are irrelevant in the forwarding path. */
- if (rt->rt6i_flags & RTF_CACHE)
+ if (rt->fib6_flags & RTF_CACHE)
return true;
return false;
@@ -4759,7 +4759,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+ return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
}
static struct fib6_info *
@@ -4784,16 +4784,16 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
* virtual router.
*/
- if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
- if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (rt->rt6i_metric < nrt->rt6i_metric)
+ if (rt->fib6_metric < nrt->fib6_metric)
continue;
- if (rt->rt6i_metric == nrt->rt6i_metric &&
+ if (rt->fib6_metric == nrt->fib6_metric &&
mlxsw_sp_fib6_rt_can_mp(rt))
return fib6_entry;
- if (rt->rt6i_metric > nrt->rt6i_metric)
+ if (rt->fib6_metric > nrt->fib6_metric)
break;
}
@@ -4899,7 +4899,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->rt6i_flags & RTF_GATEWAY ||
+ return rt->fib6_flags & RTF_GATEWAY ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -5092,9 +5092,9 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
* local, which will cause them to be trapped with a lower
* priority than packets that need to be locally received.
*/
- if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+ if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
- else if (rt->rt6i_flags & RTF_REJECT)
+ else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
@@ -5175,18 +5175,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
- if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
continue;
- if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
+ if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
+ if (replace && rt->fib6_metric == nrt->fib6_metric) {
if (mlxsw_sp_fib6_rt_can_mp(rt) ==
mlxsw_sp_fib6_rt_can_mp(nrt))
return fib6_entry;
if (mlxsw_sp_fib6_rt_can_mp(nrt))
fallback = fallback ?: fib6_entry;
}
- if (rt->rt6i_metric > nrt->rt6i_metric)
+ if (rt->fib6_metric > nrt->fib6_metric)
return fallback ?: fib6_entry;
}
@@ -5215,7 +5215,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
list_for_each_entry(last, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
- if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
+ if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
break;
fib6_entry = last;
}
@@ -5275,22 +5275,22 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib;
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
if (!vr)
return NULL;
fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
- fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
- sizeof(rt->rt6i_dst.addr),
- rt->rt6i_dst.plen);
+ fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen);
if (!fib_node)
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
- if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
- rt->rt6i_metric == iter_rt->rt6i_metric &&
+ if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
+ rt->fib6_metric == iter_rt->fib6_metric &&
mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
return fib6_entry;
}
@@ -5325,16 +5325,16 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp->router->aborted)
return 0;
- if (rt->rt6i_src.plen)
+ if (rt->fib6_src.plen)
return -EINVAL;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return 0;
- fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
- &rt->rt6i_dst.addr,
- sizeof(rt->rt6i_dst.addr),
- rt->rt6i_dst.plen,
+ fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen,
MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(fib_node))
return PTR_ERR(fib_node);
@@ -5725,6 +5725,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
@@ -5831,6 +5832,7 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
@@ -5882,24 +5884,24 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
switch (info->family) {
case AF_INET:
if (!fib4_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case AF_INET6:
if (!fib6_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case RTNL_FAMILY_IPMR:
if (!ipmr_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
case RTNL_FAMILY_IP6MR:
if (!ip6mr_rule_default(rule) && !rule->l3mdev)
- err = -1;
+ err = -EOPNOTSUPP;
break;
}
if (err < 0)
- NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
return err;
}
@@ -5926,8 +5928,15 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
- if (!err)
- return NOTIFY_DONE;
+ if (!err || info->extack)
+ return notifier_from_errno(err);
+ break;
+ case FIB_EVENT_ENTRY_ADD:
+ if (router->aborted) {
+ NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
+ return notifier_from_errno(-EINVAL);
+ }
+ break;
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 65a77708ff61..da3f7f527360 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -32,6 +32,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/if_bridge.h>
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
@@ -39,8 +40,9 @@
#include <net/ip6_tunnel.h>
#include "spectrum.h"
-#include "spectrum_span.h"
#include "spectrum_ipip.h"
+#include "spectrum_span.h"
+#include "spectrum_switchdev.h"
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
@@ -135,14 +137,14 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
const void *pkey,
- struct net_device *l3edev,
+ struct net_device *dev,
unsigned char dmac[ETH_ALEN])
{
- struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
int err = 0;
if (!neigh) {
- neigh = neigh_create(tbl, pkey, l3edev);
+ neigh = neigh_create(tbl, pkey, dev);
if (IS_ERR(neigh))
return PTR_ERR(neigh);
}
@@ -167,8 +169,97 @@ mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
return 0;
}
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
+ unsigned char *dmac,
+ u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ struct net_device *edev;
+ u16 vid = *p_vid;
+
+ if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
+ return NULL;
+ if (!vid ||
+ br_vlan_get_info(br_dev, vid, &vinfo) ||
+ !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return NULL;
+
+ edev = br_fdb_find_port(br_dev, dmac, vid);
+ if (!edev)
+ return NULL;
+
+ if (br_vlan_get_info(edev, vid, &vinfo))
+ return NULL;
+ if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ *p_vid = vid;
+ return edev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
+ unsigned char *dmac)
+{
+ return br_fdb_find_port(br_dev, dmac, 0);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
+ unsigned char dmac[ETH_ALEN],
+ u16 *p_vid)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ enum mlxsw_reg_spms_state spms_state;
+ struct net_device *dev = NULL;
+ struct mlxsw_sp_port *port;
+ u8 stp_state;
+
+ if (br_vlan_enabled(br_dev))
+ dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
+ else if (!*p_vid)
+ dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
+ if (!dev)
+ return NULL;
+
+ port = mlxsw_sp_port_dev_lower_find(dev);
+ if (!port)
+ return NULL;
+
+ bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
+ if (!bridge_port)
+ return NULL;
+
+ stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
+ spms_state = mlxsw_sp_stp_spms_state(stp_state);
+ if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
+ return NULL;
+
+ return dev;
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
+ u16 *p_vid)
+{
+ *p_vid = vlan_dev_vlan_id(vlan_dev);
+ return vlan_dev_real_dev(vlan_dev);
+}
+
+static struct net_device *
+mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter)
+ if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ return dev;
+
+ return NULL;
+}
+
static __maybe_unused int
-mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
union mlxsw_sp_l3addr saddr,
union mlxsw_sp_l3addr daddr,
union mlxsw_sp_l3addr gw,
@@ -177,21 +268,51 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
struct mlxsw_sp_span_parms *sparmsp)
{
unsigned char dmac[ETH_ALEN];
+ u16 vid = 0;
if (mlxsw_sp_l3addr_is_zero(gw))
gw = daddr;
- if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
- mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
- return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+ if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
+ goto unoffloadable;
+
+ if (is_vlan_dev(edev))
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
- sparmsp->dest_port = netdev_priv(l3edev);
+ if (netif_is_bridge_master(edev)) {
+ edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (is_vlan_dev(edev)) {
+ if (vid || !(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_vlan(edev, &vid);
+ }
+
+ if (netif_is_lag_master(edev)) {
+ if (!(edev->flags & IFF_UP))
+ goto unoffloadable;
+ edev = mlxsw_sp_span_entry_lag(edev);
+ if (!edev)
+ goto unoffloadable;
+ }
+
+ if (!mlxsw_sp_port_dev_check(edev))
+ goto unoffloadable;
+
+ sparmsp->dest_port = netdev_priv(edev);
sparmsp->ttl = ttl;
memcpy(sparmsp->dmac, dmac, ETH_ALEN);
- memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
+ memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
sparmsp->saddr = saddr;
sparmsp->daddr = daddr;
+ sparmsp->vid = vid;
return 0;
+
+unoffloadable:
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
}
#if IS_ENABLED(CONFIG_NET_IPGRE)
@@ -268,9 +389,10 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
- sparms.dmac, false);
+ sparms.dmac, !!sparms.vid);
mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
sparms.ttl, sparms.smac,
be32_to_cpu(sparms.saddr.addr4),
@@ -368,9 +490,10 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
/* Create a new port analayzer entry for local_port. */
mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
- sparms.dmac, false);
+ sparms.dmac, !!sparms.vid);
mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
sparms.saddr.addr6,
sparms.daddr.addr6);
@@ -394,6 +517,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
};
#endif
+static bool
+mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
+{
+ return is_vlan_dev(dev) &&
+ mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
+}
+
+static int
+mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct net_device *real_dev;
+ u16 vid;
+
+ if (!(to_dev->flags & IFF_UP))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
+ sparmsp->dest_port = netdev_priv(real_dev);
+ sparmsp->vid = vid;
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+ mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
+ .can_handle = mlxsw_sp_span_vlan_can_handle,
+ .parms = mlxsw_sp_span_entry_vlan_parms,
+ .configure = mlxsw_sp_span_entry_vlan_configure,
+ .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
+};
+
static const
struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
&mlxsw_sp_span_entry_ops_phys,
@@ -403,6 +581,7 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
#if IS_ENABLED(CONFIG_IPV6_GRE)
&mlxsw_sp_span_entry_ops_gretap6,
#endif
+ &mlxsw_sp_span_entry_ops_vlan,
};
static int
@@ -766,7 +945,7 @@ int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
if (!span_entry)
- return -ENOENT;
+ return -ENOBUFS;
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 4b87ec20e658..14a6de904db1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -63,6 +63,7 @@ struct mlxsw_sp_span_parms {
unsigned char smac[ETH_ALEN];
union mlxsw_sp_l3addr daddr;
union mlxsw_sp_l3addr saddr;
+ u16 vid;
};
struct mlxsw_sp_span_entry_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c11c9a635866..8c9cf8ee9398 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -49,7 +49,9 @@
#include <linux/netlink.h>
#include <net/switchdev.h>
+#include "spectrum_span.h"
#include "spectrum_router.h"
+#include "spectrum_switchdev.h"
#include "spectrum.h"
#include "core.h"
#include "reg.h"
@@ -239,7 +241,7 @@ __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
return NULL;
}
-static struct mlxsw_sp_bridge_port *
+struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
{
@@ -922,6 +924,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
+ if (switchdev_trans_ph_commit(trans))
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
return err;
}
@@ -1646,18 +1651,57 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+struct mlxsw_sp_span_respin_work {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
+{
+ struct mlxsw_sp_span_respin_work *respin_work =
+ container_of(work, struct mlxsw_sp_span_respin_work, work);
+
+ rtnl_lock();
+ mlxsw_sp_span_respin(respin_work->mlxsw_sp);
+ rtnl_unlock();
+ kfree(respin_work);
+}
+
+static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_span_respin_work *respin_work;
+
+ respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
+ if (!respin_work)
+ return;
+
+ INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
+ respin_work->mlxsw_sp = mlxsw_sp;
+
+ mlxsw_core_schedule_work(&respin_work->work);
+}
+
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
+
+ if (switchdev_trans_ph_commit(trans)) {
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ }
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
@@ -1718,13 +1762,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *dev = mlxsw_sp_port->dev;
int err;
- if (bridge_port->bridge_device->multicast_enabled) {
- if (bridge_port->bridge_device->multicast_enabled) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
- false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
+ if (bridge_port->bridge_device->multicast_enabled &&
+ !bridge_port->mrouter) {
+ err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
+ if (err)
+ netdev_err(dev, "Unable to remove port from SMID\n");
}
err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
@@ -1808,6 +1850,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
return err;
}
@@ -2224,6 +2268,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
if (err)
break;
@@ -2233,10 +2279,20 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
break;
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ /* These events are only used to potentially update an existing
+ * SPAN mirror.
+ */
+ break;
}
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+
out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
@@ -2265,7 +2321,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
+ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
@@ -2297,6 +2355,12 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = {
.notifier_call = mlxsw_sp_switchdev_event,
};
+u8
+mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ return bridge_port->stp_state;
+}
+
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
new file mode 100644
index 000000000000..bc44d5effc28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_bridge_port;
+
+struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev);
+
+u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index a655c5850aa6..3922c1cfe5f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -417,13 +417,10 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- int err;
-
- err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
- if (err >= len)
- return -EINVAL;
- return 0;
+ return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
+ mlxsw_sx_port->local_port,
+ name, len);
}
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
@@ -1149,7 +1146,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev, false, 0);
+ mlxsw_sx_port, dev, module + 1, false, 0);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
new file mode 100644
index 000000000000..36c84625d54e
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+config NET_VENDOR_MICROSEMI
+ bool "Microsemi devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microsemi devices.
+
+if NET_VENDOR_MICROSEMI
+
+config MSCC_OCELOT_SWITCH
+ tristate "Ocelot switch driver"
+ depends on NET_SWITCHDEV
+ depends on HAS_IOMEM
+ select PHYLIB
+ select REGMAP_MMIO
+ help
+ This driver supports the Ocelot network switch device.
+
+config MSCC_OCELOT_SWITCH_OCELOT
+ tristate "Ocelot switch driver on Ocelot"
+ depends on MSCC_OCELOT_SWITCH
+ help
+ This driver supports the Ocelot network switch device as present on
+ the Ocelot SoCs.
+
+endif # NET_VENDOR_MICROSEMI
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
new file mode 100644
index 000000000000..cb52a3b128ae
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
+mscc_ocelot_common-y := ocelot.o ocelot_io.o
+mscc_ocelot_common-y += ocelot_regs.o
+obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
new file mode 100644
index 000000000000..c8c74aa548d9
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <net/arp.h>
+#include <net/netevent.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
+
+#include "ocelot.h"
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
+struct ocelot_mact_entry {
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ enum macaccess_entry_type type;
+};
+
+static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+{
+ unsigned int val, timeout = 10;
+
+ /* Wait for the issued mac table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_MACACCESS is
+ * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ */
+ do {
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+ val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M;
+ } while (val != MACACCESS_CMD_IDLE && timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void ocelot_mact_select(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
+ ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
+
+}
+
+static int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
+ ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+
+static int ocelot_mact_forget(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a forget command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
+ ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+
+static void ocelot_mact_init(struct ocelot *ocelot)
+{
+ /* Configure the learning mode entries attributes:
+ * - Do not copy the frame to the CPU extraction queues.
+ * - Use the vlan and mac_cpoy for dmac lookup.
+ */
+ ocelot_rmw(ocelot, 0,
+ ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
+ | ANA_AGENCTRL_LEARN_FWD_KILL
+ | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
+ ANA_AGENCTRL);
+
+ /* Clear the MAC table */
+ ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
+}
+
+static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+{
+ unsigned int val, timeout = 10;
+
+ /* Wait for the issued mac table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_MACACCESS is
+ * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ */
+ do {
+ val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
+ val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M;
+ } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void ocelot_vlan_init(struct ocelot *ocelot)
+{
+ /* Clear VLAN table, by default all ports are members of all VLANs */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
+ ANA_TABLES_VLANACCESS);
+ ocelot_vlant_wait_for_completion(ocelot);
+}
+
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 ocelot_wm_enc(u16 value)
+{
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+
+static void ocelot_port_adjust_link(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ int speed, atop_wm, mode = 0;
+
+ switch (dev->phydev->speed) {
+ case SPEED_10:
+ speed = OCELOT_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = OCELOT_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = OCELOT_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ case SPEED_2500:
+ speed = OCELOT_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ default:
+ netdev_err(dev, "Unsupported PHY speed: %d\n",
+ dev->phydev->speed);
+ return;
+ }
+
+ phy_print_status(dev->phydev);
+
+ if (!dev->phydev->link)
+ return;
+
+ /* Only full duplex supported for now */
+ ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA |
+ mode, DEV_MAC_MODE_CFG);
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG);
+ ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(port, 0, PCS1G_LB_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG);
+ ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Enable MAC module */
+ ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
+ * reset */
+ ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ DEV_CLOCK_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* No PFC */
+ ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
+ ANA_PFC_PFC_CFG, p);
+
+ /* Set Pause WM hysteresis
+ * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
+ * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ
+ */
+ ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
+ SYS_PAUSE_CFG_PAUSE_STOP(101) |
+ SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, p);
+
+ /* Flow control */
+ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
+ SYS_MAC_FC_CFG, p);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p);
+
+ /* Tail dropping watermark */
+ atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN),
+ SYS_ATOP, p);
+ ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+}
+
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
+ ANA_PORT_PORT_CFG, port->chip_port);
+
+ err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
+ PHY_INTERFACE_MODE_NA);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ dev->phydev = port->phy;
+
+ phy_attached_info(port->phy);
+ phy_start(port->phy);
+ return 0;
+}
+
+static int ocelot_port_stop(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+
+ phy_disconnect(port->phy);
+
+ dev->phydev = NULL;
+
+ ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG);
+ ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, port->chip_port);
+ return 0;
+}
+
+/* Generate the IFH for frame injection
+ *
+ * The IFH is a 128bit-value
+ * bit 127: bypass the analyzer processing
+ * bit 56-67: destination mask
+ * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
+ * bit 20-27: cpu extraction queue mask
+ * bit 16: tag type 0: C-tag, 1: S-tag
+ * bit 0-11: VID
+ */
+static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
+{
+ ifh[0] = IFH_INJ_BYPASS;
+ ifh[1] = (0xff00 & info->port) >> 8;
+ ifh[2] = (0xff & info->port) << 24;
+ ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
+ (info->tag_type << 16) | info->vid;
+
+ return 0;
+}
+
+static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ u32 val, ifh[IFH_LEN];
+ struct frame_info info = {};
+ u8 grp = 0; /* Send everything on CPU group 0 */
+ unsigned int i, count, last;
+
+ val = ocelot_read(ocelot, QS_INJ_STATUS);
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
+ (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+ return NETDEV_TX_BUSY;
+
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+ info.port = BIT(port->chip_port);
+ info.cpuq = 0xff;
+ ocelot_gen_ifh(ifh, &info);
+
+ for (i = 0; i < IFH_LEN; i++)
+ ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+
+ count = (skb->len + 3) / 4;
+ last = skb->len % 4;
+ for (i = 0; i < count; i++) {
+ ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+ }
+
+ /* Add padding */
+ while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ i++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+ QS_INJ_CTRL_EOF,
+ QS_INJ_CTRL, grp);
+
+ /* Add dummy CRC */
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void ocelot_mact_mc_reset(struct ocelot_port *port)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha, *n;
+
+ /* Free and forget all the MAC addresses stored in the port private mc
+ * list. These are mc addresses that were previously added by calling
+ * ocelot_mact_mc_add().
+ */
+ list_for_each_entry_safe(ha, n, &port->mc, list) {
+ ocelot_mact_forget(ocelot, ha->addr, port->pvid);
+ list_del(&ha->list);
+ kfree(ha);
+ }
+}
+
+static int ocelot_mact_mc_add(struct ocelot_port *port,
+ struct netdev_hw_addr *hw_addr)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+
+ if (!ha)
+ return -ENOMEM;
+
+ memcpy(ha, hw_addr, sizeof(*ha));
+ list_add_tail(&ha->list, &port->mc);
+
+ ocelot_mact_learn(ocelot, PGID_CPU, ha->addr, port->pvid,
+ ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+static void ocelot_set_rx_mode(struct net_device *dev)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct netdev_hw_addr *ha;
+ int i;
+ u32 val;
+
+ /* This doesn't handle promiscuous mode because the bridge core is
+ * setting IFF_PROMISC on all slave interfaces and all frames would be
+ * forwarded to the CPU port.
+ */
+ val = GENMASK(ocelot->num_phys_ports - 1, 0);
+ for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++)
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+
+ /* Handle the device multicast addresses. First remove all the
+ * previously installed addresses and then add the latest ones to the
+ * mac table.
+ */
+ ocelot_mact_mc_reset(port);
+ netdev_for_each_mc_addr(ha, dev)
+ ocelot_mact_mc_add(port, ha);
+}
+
+static int ocelot_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ const struct sockaddr *addr = p;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid,
+ ENTRYTYPE_LOCKED);
+ /* Then forget the previous one. */
+ ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid);
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static void ocelot_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port),
+ SYS_STAT_CFG);
+
+ /* Get Rx stats */
+ stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
+ stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_64) +
+ ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
+ ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
+ stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ /* Get Tx stats */
+ stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
+ stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
+ ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
+ ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
+ stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
+ ocelot_read(ocelot, SYS_COUNT_TX_AGING);
+ stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+}
+
+static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 flags)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
+ ENTRYTYPE_NORMAL);
+}
+
+static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ return ocelot_mact_forget(ocelot, addr, vid);
+}
+
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry,
+ struct ocelot_dump_ctx *dump)
+{
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac))
+ goto nla_put_failure;
+
+ if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col,
+ struct ocelot_mact_entry *entry)
+{
+ struct ocelot *ocelot = port->ocelot;
+ char mac[ETH_ALEN];
+ u32 val, dst, macl, mach;
+
+ /* Set row and column to read from */
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
+
+ /* Issue a read command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot))
+ return -ETIMEDOUT;
+
+ /* Read the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -EINVAL;
+
+ /* If the entry read has another port configured as its destination,
+ * do not report it.
+ */
+ dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
+ if (dst != port->chip_port)
+ return -EINVAL;
+
+ /* Get the entry's MAC address and VLAN id */
+ macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
+ mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
+
+ mac[0] = (mach >> 8) & 0xff;
+ mac[1] = (mach >> 0) & 0xff;
+ mac[2] = (macl >> 24) & 0xff;
+ mac[3] = (macl >> 16) & 0xff;
+ mac[4] = (macl >> 8) & 0xff;
+ mac[5] = (macl >> 0) & 0xff;
+
+ entry->vid = (mach >> 16) & 0xfff;
+ ether_addr_copy(entry->mac, mac);
+
+ return 0;
+}
+
+static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ int i, j, ret = 0;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+
+ struct ocelot_mact_entry entry;
+
+ /* Loop through all the mac tables entries. There are 1024 rows of 4
+ * entries.
+ */
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 4; j++) {
+ ret = ocelot_mact_read(port, i, j, &entry);
+ /* If the entry is invalid (wrong port, invalid...),
+ * skip it.
+ */
+ if (ret == -EINVAL)
+ continue;
+ else if (ret)
+ goto end;
+
+ ret = ocelot_fdb_do_dump(&entry, &dump);
+ if (ret)
+ goto end;
+ }
+ }
+
+end:
+ *idx = dump.idx;
+ return ret;
+}
+
+static const struct net_device_ops ocelot_port_netdev_ops = {
+ .ndo_open = ocelot_port_open,
+ .ndo_stop = ocelot_port_stop,
+ .ndo_start_xmit = ocelot_port_xmit,
+ .ndo_set_rx_mode = ocelot_set_rx_mode,
+ .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
+ .ndo_set_mac_address = ocelot_port_set_mac_address,
+ .ndo_get_stats64 = ocelot_get_stats64,
+ .ndo_fdb_add = ocelot_fdb_add,
+ .ndo_fdb_del = ocelot_fdb_del,
+ .ndo_fdb_dump = ocelot_fdb_dump,
+};
+
+static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct ocelot_port *port = netdev_priv(netdev);
+ struct ocelot *ocelot = port->ocelot;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ocelot->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void ocelot_check_stats(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
+ int i, j;
+
+ mutex_lock(&ocelot->stats_lock);
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+
+ for (j = 0; j < ocelot->num_stats; j++) {
+ u32 val;
+ unsigned int idx = i * ocelot->num_stats + j;
+
+ val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ ocelot->stats_layout[j].offset);
+
+ if (val < (ocelot->stats[idx] & U32_MAX))
+ ocelot->stats[idx] += (u64)1 << 32;
+
+ ocelot->stats[idx] = (ocelot->stats[idx] &
+ ~(u64)U32_MAX) + val;
+ }
+ }
+
+ cancel_delayed_work(&ocelot->stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int i;
+
+ /* check and update now */
+ ocelot_check_stats(&ocelot->stats_work.work);
+
+ /* Copy all counters */
+ for (i = 0; i < ocelot->num_stats; i++)
+ *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i];
+}
+
+static int ocelot_get_sset_count(struct net_device *dev, int sset)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+ return ocelot->num_stats;
+}
+
+static const struct ethtool_ops ocelot_ethtool_ops = {
+ .get_strings = ocelot_get_strings,
+ .get_ethtool_stats = ocelot_get_ethtool_stats,
+ .get_sset_count = ocelot_get_sset_count,
+};
+
+static int ocelot_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(ocelot->base_mac);
+ memcpy(&attr->u.ppid.id, &ocelot->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 port_cfg;
+ int port, i;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask))
+ return 0;
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG,
+ ocelot_port->chip_port);
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port);
+ /* Fallthrough */
+ case BR_STATE_LEARNING:
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ break;
+
+ default:
+ port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
+ ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port);
+ break;
+ }
+
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG,
+ ocelot_port->chip_port);
+
+ /* Apply FWD mask. The loop is needed to add/remove the current port as
+ * a source for the other ports.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (ocelot->bridge_fwd_mask & BIT(port)) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port);
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ unsigned long bond_mask = ocelot->lags[i];
+
+ if (!bond_mask)
+ continue;
+
+ if (bond_mask & BIT(port)) {
+ mask &= ~bond_mask;
+ break;
+ }
+ }
+
+ ocelot_write_rix(ocelot,
+ BIT(ocelot->num_phys_ports) | mask,
+ ANA_PGID_PGID, PGID_SRC + port);
+ } else {
+ /* Only the CPU port, this is compatible with link
+ * aggregation.
+ */
+ ocelot_write_rix(ocelot,
+ BIT(ocelot->num_phys_ports),
+ ANA_PGID_PGID, PGID_SRC + port);
+ }
+ }
+
+ return 0;
+}
+
+static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port,
+ unsigned long ageing_clock_t)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2),
+ ANA_AUTOAGE);
+}
+
+static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG,
+ port->chip_port);
+
+ if (mc)
+ val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ else
+ val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA);
+
+ ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port);
+}
+
+static int ocelot_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ocelot_port_attr_stp_state_set(ocelot_port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct ocelot_multicast *mc;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
+ return mc;
+ }
+
+ return NULL;
+}
+
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct ocelot_multicast *mc;
+ unsigned char addr[ETH_ALEN];
+ u16 vid = mdb->vid;
+ bool new = false;
+
+ if (!vid)
+ vid = 1;
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc) {
+ mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ memcpy(mc->addr, mdb->addr, ETH_ALEN);
+ mc->vid = vid;
+
+ list_add_tail(&mc->list, &ocelot->multicast);
+ new = true;
+ }
+
+ memcpy(addr, mc->addr, ETH_ALEN);
+ addr[0] = 0;
+
+ if (!new) {
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+ ocelot_mact_forget(ocelot, addr, vid);
+ }
+
+ mc->ports |= BIT(port->chip_port);
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+
+ return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
+}
+
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ struct ocelot_multicast *mc;
+ unsigned char addr[ETH_ALEN];
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = 1;
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc)
+ return -ENOENT;
+
+ memcpy(addr, mc->addr, ETH_ALEN);
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+ addr[0] = 0;
+ ocelot_mact_forget(ocelot, addr, vid);
+
+ mc->ports &= ~BIT(port->chip_port);
+ if (!mc->ports) {
+ list_del(&mc->list);
+ devm_kfree(ocelot->dev, mc);
+ return 0;
+ }
+
+ addr[2] = mc->ports << 0;
+ addr[1] = mc->ports << 8;
+
+ return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
+}
+
+static int ocelot_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ocelot_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct switchdev_ops ocelot_port_switchdev_ops = {
+ .switchdev_port_attr_get = ocelot_port_attr_get,
+ .switchdev_port_attr_set = ocelot_port_attr_set,
+ .switchdev_port_obj_add = ocelot_port_obj_add,
+ .switchdev_port_obj_del = ocelot_port_obj_del,
+};
+
+static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
+ struct net_device *bridge)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (!ocelot->bridge_mask) {
+ ocelot->hw_bridge_dev = bridge;
+ } else {
+ if (ocelot->hw_bridge_dev != bridge)
+ /* This is adding the port to a second bridge, this is
+ * unsupported */
+ return -ENODEV;
+ }
+
+ ocelot->bridge_mask |= BIT(ocelot_port->chip_port);
+
+ return 0;
+}
+
+static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bridge)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port);
+
+ if (!ocelot->bridge_mask)
+ ocelot->hw_bridge_dev = NULL;
+}
+
+/* Checks if the net_device instance given to us originate from our driver. */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+static int ocelot_netdevice_port_event(struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ int err = 0;
+
+ if (!ocelot_netdevice_dev_check(dev))
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_bridge_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_bridge_leave(ocelot_port,
+ info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ocelot_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (netif_is_lag_master(dev)) {
+ struct net_device *slave;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, slave, iter) {
+ ret = ocelot_netdevice_port_event(slave, event, info);
+ if (ret)
+ goto notify;
+ }
+ } else {
+ ret = ocelot_netdevice_port_event(dev, event, info);
+ }
+
+notify:
+ return notifier_from_errno(ret);
+}
+
+struct notifier_block ocelot_netdevice_nb __read_mostly = {
+ .notifier_call = ocelot_netdevice_event,
+};
+EXPORT_SYMBOL(ocelot_netdevice_nb);
+
+int ocelot_probe_port(struct ocelot *ocelot, u8 port,
+ void __iomem *regs,
+ struct phy_device *phy)
+{
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct ocelot_port));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, ocelot->dev);
+ ocelot_port = netdev_priv(dev);
+ ocelot_port->dev = dev;
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->regs = regs;
+ ocelot_port->chip_port = port;
+ ocelot_port->phy = phy;
+ INIT_LIST_HEAD(&ocelot_port->mc);
+ ocelot->ports[port] = ocelot_port;
+
+ dev->netdev_ops = &ocelot_port_netdev_ops;
+ dev->ethtool_ops = &ocelot_ethtool_ops;
+ dev->switchdev_ops = &ocelot_port_switchdev_ops;
+
+ memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
+ dev->dev_addr[ETH_ALEN - 1] += port;
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
+ ENTRYTYPE_LOCKED);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(ocelot->dev, "register_netdev failed\n");
+ goto err_register_netdev;
+ }
+
+ return 0;
+
+err_register_netdev:
+ free_netdev(dev);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_probe_port);
+
+int ocelot_init(struct ocelot *ocelot)
+{
+ u32 port;
+ int i, cpu = ocelot->num_phys_ports;
+ char queue_name[32];
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * ocelot->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ mutex_init(&ocelot->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ ocelot_mact_init(ocelot);
+ ocelot_vlan_init(ocelot);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Clear all counters (5 groups) */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
+ SYS_STAT_CFG);
+ }
+
+ /* Only use S-Tag */
+ ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
+
+ /* Aggregation mode */
+ ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
+ ANA_AGGR_CFG_AC_DMAC_ENA |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG);
+
+ /* Set MAC age time to default value. The entry is aged after
+ * 2*AGE_PERIOD
+ */
+ ocelot_write(ocelot,
+ ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
+
+ /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
+ ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
+ SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
+
+ /* Setup flooding PGIDs */
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, 0);
+ ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
+ ANA_FLOODING_IPMC);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Transmit the frame to the local port. */
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+ /* Do not forward BPDU frames to the front ports. */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ port);
+ /* Ensure bridging is disabled */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
+ }
+
+ /* Configure and enable the CPU port. */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
+
+ /* Allow broadcast MAC frames. */
+ for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
+ u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+ }
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
+
+ /* CPU port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, cpu);
+ ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) |
+ SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
+ /* Allow manual injection via DEVCPU_QS registers, and byte swap these
+ * registers endianness.
+ */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
+ QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
+ QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
+ ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
+ ANA_CPUQ_CFG_CPUQ_LRN(2) |
+ ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
+ ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
+ ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
+ ANA_CPUQ_CFG_CPUQ_IGMP(6) |
+ ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
+ for (i = 0; i < 16; i++)
+ ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
+ ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
+ ANA_CPUQ_8021_CFG, i);
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init);
+
+void ocelot_deinit(struct ocelot *ocelot)
+{
+ destroy_workqueue(ocelot->stats_queue);
+ mutex_destroy(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_deinit);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
new file mode 100644
index 000000000000..097bd12a10d4
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_H_
+#define _MSCC_OCELOT_H_
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "ocelot_ana.h"
+#include "ocelot_dev.h"
+#include "ocelot_hsio.h"
+#include "ocelot_qsys.h"
+#include "ocelot_rew.h"
+#include "ocelot_sys.h"
+#include "ocelot_qs.h"
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 5)
+#define PGID_UC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+#define OCELOT_BUFFER_CELL_SZ 60
+
+#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+
+#define IFH_LEN 4
+
+struct frame_info {
+ u32 len;
+ u16 port;
+ u16 vid;
+ u8 cpuq;
+ u8 tag_type;
+};
+
+#define IFH_INJ_BYPASS BIT(31)
+#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define OCELOT_SPEED_2500 0
+#define OCELOT_SPEED_1000 1
+#define OCELOT_SPEED_100 2
+#define OCELOT_SPEED_10 3
+
+#define TARGET_OFFSET 24
+#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
+#define REG(reg, offset) [reg & REG_MASK] = offset
+
+enum ocelot_target {
+ ANA = 1,
+ QS,
+ QSYS,
+ REW,
+ SYS,
+ HSIO,
+ TARGET_MAX,
+};
+
+enum ocelot_reg {
+ ANA_ADVLEARN = ANA << TARGET_OFFSET,
+ ANA_VLANMASK,
+ ANA_PORT_B_DOMAIN,
+ ANA_ANAGEFIL,
+ ANA_ANEVENTS,
+ ANA_STORMLIMIT_BURST,
+ ANA_STORMLIMIT_CFG,
+ ANA_ISOLATED_PORTS,
+ ANA_COMMUNITY_PORTS,
+ ANA_AUTOAGE,
+ ANA_MACTOPTIONS,
+ ANA_LEARNDISC,
+ ANA_AGENCTRL,
+ ANA_MIRRORPORTS,
+ ANA_EMIRRORPORTS,
+ ANA_FLOODING,
+ ANA_FLOODING_IPMC,
+ ANA_SFLOW_CFG,
+ ANA_PORT_MODE,
+ ANA_CUT_THRU_CFG,
+ ANA_PGID_PGID,
+ ANA_TABLES_ANMOVED,
+ ANA_TABLES_MACHDATA,
+ ANA_TABLES_MACLDATA,
+ ANA_TABLES_STREAMDATA,
+ ANA_TABLES_MACACCESS,
+ ANA_TABLES_MACTINDX,
+ ANA_TABLES_VLANACCESS,
+ ANA_TABLES_VLANTIDX,
+ ANA_TABLES_ISDXACCESS,
+ ANA_TABLES_ISDXTIDX,
+ ANA_TABLES_ENTRYLIM,
+ ANA_TABLES_PTP_ID_HIGH,
+ ANA_TABLES_PTP_ID_LOW,
+ ANA_TABLES_STREAMACCESS,
+ ANA_TABLES_STREAMTIDX,
+ ANA_TABLES_SEQ_HISTORY,
+ ANA_TABLES_SEQ_MASK,
+ ANA_TABLES_SFID_MASK,
+ ANA_TABLES_SFIDACCESS,
+ ANA_TABLES_SFIDTIDX,
+ ANA_MSTI_STATE,
+ ANA_OAM_UPM_LM_CNT,
+ ANA_SG_ACCESS_CTRL,
+ ANA_SG_CONFIG_REG_1,
+ ANA_SG_CONFIG_REG_2,
+ ANA_SG_CONFIG_REG_3,
+ ANA_SG_CONFIG_REG_4,
+ ANA_SG_CONFIG_REG_5,
+ ANA_SG_GCL_GS_CONFIG,
+ ANA_SG_GCL_TI_CONFIG,
+ ANA_SG_STATUS_REG_1,
+ ANA_SG_STATUS_REG_2,
+ ANA_SG_STATUS_REG_3,
+ ANA_PORT_VLAN_CFG,
+ ANA_PORT_DROP_CFG,
+ ANA_PORT_QOS_CFG,
+ ANA_PORT_VCAP_CFG,
+ ANA_PORT_VCAP_S1_KEY_CFG,
+ ANA_PORT_VCAP_S2_CFG,
+ ANA_PORT_PCP_DEI_MAP,
+ ANA_PORT_CPU_FWD_CFG,
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ ANA_PORT_CPU_FWD_GARP_CFG,
+ ANA_PORT_CPU_FWD_CCM_CFG,
+ ANA_PORT_PORT_CFG,
+ ANA_PORT_POL_CFG,
+ ANA_PORT_PTP_CFG,
+ ANA_PORT_PTP_DLY1_CFG,
+ ANA_PORT_PTP_DLY2_CFG,
+ ANA_PORT_SFID_CFG,
+ ANA_PFC_PFC_CFG,
+ ANA_PFC_PFC_TIMER,
+ ANA_IPT_OAM_MEP_CFG,
+ ANA_IPT_IPT,
+ ANA_PPT_PPT,
+ ANA_FID_MAP_FID_MAP,
+ ANA_AGGR_CFG,
+ ANA_CPUQ_CFG,
+ ANA_CPUQ_CFG2,
+ ANA_CPUQ_8021_CFG,
+ ANA_DSCP_CFG,
+ ANA_DSCP_REWR_CFG,
+ ANA_VCAP_RNG_TYPE_CFG,
+ ANA_VCAP_RNG_VAL_CFG,
+ ANA_VRAP_CFG,
+ ANA_VRAP_HDR_DATA,
+ ANA_VRAP_HDR_MASK,
+ ANA_DISCARD_CFG,
+ ANA_FID_CFG,
+ ANA_POL_PIR_CFG,
+ ANA_POL_CIR_CFG,
+ ANA_POL_MODE_CFG,
+ ANA_POL_PIR_STATE,
+ ANA_POL_CIR_STATE,
+ ANA_POL_STATE,
+ ANA_POL_FLOWC,
+ ANA_POL_HYST,
+ ANA_POL_MISC_CFG,
+ QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
+ QS_XTR_RD,
+ QS_XTR_FRM_PRUNING,
+ QS_XTR_FLUSH,
+ QS_XTR_DATA_PRESENT,
+ QS_XTR_CFG,
+ QS_INJ_GRP_CFG,
+ QS_INJ_WR,
+ QS_INJ_CTRL,
+ QS_INJ_STATUS,
+ QS_INJ_ERR,
+ QS_INH_DBG,
+ QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
+ QSYS_SWITCH_PORT_MODE,
+ QSYS_STAT_CNT_CFG,
+ QSYS_EEE_CFG,
+ QSYS_EEE_THRES,
+ QSYS_IGR_NO_SHARING,
+ QSYS_EGR_NO_SHARING,
+ QSYS_SW_STATUS,
+ QSYS_EXT_CPU_CFG,
+ QSYS_PAD_CFG,
+ QSYS_CPU_GROUP_MAP,
+ QSYS_QMAP,
+ QSYS_ISDX_SGRP,
+ QSYS_TIMED_FRAME_ENTRY,
+ QSYS_TFRM_MISC,
+ QSYS_TFRM_PORT_DLY,
+ QSYS_TFRM_TIMER_CFG_1,
+ QSYS_TFRM_TIMER_CFG_2,
+ QSYS_TFRM_TIMER_CFG_3,
+ QSYS_TFRM_TIMER_CFG_4,
+ QSYS_TFRM_TIMER_CFG_5,
+ QSYS_TFRM_TIMER_CFG_6,
+ QSYS_TFRM_TIMER_CFG_7,
+ QSYS_TFRM_TIMER_CFG_8,
+ QSYS_RED_PROFILE,
+ QSYS_RES_QOS_MODE,
+ QSYS_RES_CFG,
+ QSYS_RES_STAT,
+ QSYS_EGR_DROP_MODE,
+ QSYS_EQ_CTRL,
+ QSYS_EVENTS_CORE,
+ QSYS_QMAXSDU_CFG_0,
+ QSYS_QMAXSDU_CFG_1,
+ QSYS_QMAXSDU_CFG_2,
+ QSYS_QMAXSDU_CFG_3,
+ QSYS_QMAXSDU_CFG_4,
+ QSYS_QMAXSDU_CFG_5,
+ QSYS_QMAXSDU_CFG_6,
+ QSYS_QMAXSDU_CFG_7,
+ QSYS_PREEMPTION_CFG,
+ QSYS_CIR_CFG,
+ QSYS_EIR_CFG,
+ QSYS_SE_CFG,
+ QSYS_SE_DWRR_CFG,
+ QSYS_SE_CONNECT,
+ QSYS_SE_DLB_SENSE,
+ QSYS_CIR_STATE,
+ QSYS_EIR_STATE,
+ QSYS_SE_STATE,
+ QSYS_HSCH_MISC_CFG,
+ QSYS_TAG_CONFIG,
+ QSYS_TAS_PARAM_CFG_CTRL,
+ QSYS_PORT_MAX_SDU,
+ QSYS_PARAM_CFG_REG_1,
+ QSYS_PARAM_CFG_REG_2,
+ QSYS_PARAM_CFG_REG_3,
+ QSYS_PARAM_CFG_REG_4,
+ QSYS_PARAM_CFG_REG_5,
+ QSYS_GCL_CFG_REG_1,
+ QSYS_GCL_CFG_REG_2,
+ QSYS_PARAM_STATUS_REG_1,
+ QSYS_PARAM_STATUS_REG_2,
+ QSYS_PARAM_STATUS_REG_3,
+ QSYS_PARAM_STATUS_REG_4,
+ QSYS_PARAM_STATUS_REG_5,
+ QSYS_PARAM_STATUS_REG_6,
+ QSYS_PARAM_STATUS_REG_7,
+ QSYS_PARAM_STATUS_REG_8,
+ QSYS_PARAM_STATUS_REG_9,
+ QSYS_GCL_STATUS_REG_1,
+ QSYS_GCL_STATUS_REG_2,
+ REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
+ REW_TAG_CFG,
+ REW_PORT_CFG,
+ REW_DSCP_CFG,
+ REW_PCP_DEI_QOS_MAP_CFG,
+ REW_PTP_CFG,
+ REW_PTP_DLY1_CFG,
+ REW_RED_TAG_CFG,
+ REW_DSCP_REMAP_DP1_CFG,
+ REW_DSCP_REMAP_CFG,
+ REW_STAT_CFG,
+ REW_REW_STICKY,
+ REW_PPT,
+ SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
+ SYS_COUNT_RX_UNICAST,
+ SYS_COUNT_RX_MULTICAST,
+ SYS_COUNT_RX_BROADCAST,
+ SYS_COUNT_RX_SHORTS,
+ SYS_COUNT_RX_FRAGMENTS,
+ SYS_COUNT_RX_JABBERS,
+ SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_SYM_ERRS,
+ SYS_COUNT_RX_64,
+ SYS_COUNT_RX_65_127,
+ SYS_COUNT_RX_128_255,
+ SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_1024_1526,
+ SYS_COUNT_RX_1527_MAX,
+ SYS_COUNT_RX_PAUSE,
+ SYS_COUNT_RX_CONTROL,
+ SYS_COUNT_RX_LONGS,
+ SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_TX_OCTETS,
+ SYS_COUNT_TX_UNICAST,
+ SYS_COUNT_TX_MULTICAST,
+ SYS_COUNT_TX_BROADCAST,
+ SYS_COUNT_TX_COLLISION,
+ SYS_COUNT_TX_DROPS,
+ SYS_COUNT_TX_PAUSE,
+ SYS_COUNT_TX_64,
+ SYS_COUNT_TX_65_127,
+ SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_512_1023,
+ SYS_COUNT_TX_1024_1526,
+ SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_AGING,
+ SYS_RESET_CFG,
+ SYS_SR_ETYPE_CFG,
+ SYS_VLAN_ETYPE_CFG,
+ SYS_PORT_MODE,
+ SYS_FRONT_PORT_MODE,
+ SYS_FRM_AGING,
+ SYS_STAT_CFG,
+ SYS_SW_STATUS,
+ SYS_MISC_CFG,
+ SYS_REW_MAC_HIGH_CFG,
+ SYS_REW_MAC_LOW_CFG,
+ SYS_TIMESTAMP_OFFSET,
+ SYS_CMID,
+ SYS_PAUSE_CFG,
+ SYS_PAUSE_TOT_CFG,
+ SYS_ATOP,
+ SYS_ATOP_TOT_CFG,
+ SYS_MAC_FC_CFG,
+ SYS_MMGT,
+ SYS_MMGT_FAST,
+ SYS_EVENTS_DIF,
+ SYS_EVENTS_CORE,
+ SYS_CNT,
+ SYS_PTP_STATUS,
+ SYS_PTP_TXSTAMP,
+ SYS_PTP_NXT,
+ SYS_PTP_CFG,
+ SYS_RAM_INIT,
+ SYS_CM_ADDR,
+ SYS_CM_DATA_WR,
+ SYS_CM_DATA_RD,
+ SYS_CM_OP,
+ SYS_CM_DATA,
+ HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
+ HSIO_PLL5G_CFG1,
+ HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG3,
+ HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG5,
+ HSIO_PLL5G_CFG6,
+ HSIO_PLL5G_STATUS0,
+ HSIO_PLL5G_STATUS1,
+ HSIO_PLL5G_BIST_CFG0,
+ HSIO_PLL5G_BIST_CFG1,
+ HSIO_PLL5G_BIST_CFG2,
+ HSIO_PLL5G_BIST_STAT0,
+ HSIO_PLL5G_BIST_STAT1,
+ HSIO_RCOMP_CFG0,
+ HSIO_RCOMP_STATUS,
+ HSIO_SYNC_ETH_CFG,
+ HSIO_SYNC_ETH_PLL_CFG,
+ HSIO_S1G_DES_CFG,
+ HSIO_S1G_IB_CFG,
+ HSIO_S1G_OB_CFG,
+ HSIO_S1G_SER_CFG,
+ HSIO_S1G_COMMON_CFG,
+ HSIO_S1G_PLL_CFG,
+ HSIO_S1G_PLL_STATUS,
+ HSIO_S1G_DFT_CFG0,
+ HSIO_S1G_DFT_CFG1,
+ HSIO_S1G_DFT_CFG2,
+ HSIO_S1G_TP_CFG,
+ HSIO_S1G_RC_PLL_BIST_CFG,
+ HSIO_S1G_MISC_CFG,
+ HSIO_S1G_DFT_STATUS,
+ HSIO_S1G_MISC_STATUS,
+ HSIO_MCB_S1G_ADDR_CFG,
+ HSIO_S6G_DIG_CFG,
+ HSIO_S6G_DFT_CFG0,
+ HSIO_S6G_DFT_CFG1,
+ HSIO_S6G_DFT_CFG2,
+ HSIO_S6G_TP_CFG0,
+ HSIO_S6G_TP_CFG1,
+ HSIO_S6G_RC_PLL_BIST_CFG,
+ HSIO_S6G_MISC_CFG,
+ HSIO_S6G_OB_ANEG_CFG,
+ HSIO_S6G_DFT_STATUS,
+ HSIO_S6G_ERR_CNT,
+ HSIO_S6G_MISC_STATUS,
+ HSIO_S6G_DES_CFG,
+ HSIO_S6G_IB_CFG,
+ HSIO_S6G_IB_CFG1,
+ HSIO_S6G_IB_CFG2,
+ HSIO_S6G_IB_CFG3,
+ HSIO_S6G_IB_CFG4,
+ HSIO_S6G_IB_CFG5,
+ HSIO_S6G_OB_CFG,
+ HSIO_S6G_OB_CFG1,
+ HSIO_S6G_SER_CFG,
+ HSIO_S6G_COMMON_CFG,
+ HSIO_S6G_PLL_CFG,
+ HSIO_S6G_ACJTAG_CFG,
+ HSIO_S6G_GP_CFG,
+ HSIO_S6G_IB_STATUS0,
+ HSIO_S6G_IB_STATUS1,
+ HSIO_S6G_ACJTAG_STATUS,
+ HSIO_S6G_PLL_STATUS,
+ HSIO_S6G_REVID,
+ HSIO_MCB_S6G_ADDR_CFG,
+ HSIO_HW_CFG,
+ HSIO_HW_QSGMII_CFG,
+ HSIO_HW_QSGMII_STAT,
+ HSIO_CLK_CFG,
+ HSIO_TEMP_SENSOR_CTRL,
+ HSIO_TEMP_SENSOR_CFG,
+ HSIO_TEMP_SENSOR_STAT,
+};
+
+enum ocelot_regfield {
+ ANA_ADVLEARN_VLAN_CHK,
+ ANA_ADVLEARN_LEARN_MIRROR,
+ ANA_ANEVENTS_FLOOD_DISCARD,
+ ANA_ANEVENTS_MSTI_DROP,
+ ANA_ANEVENTS_ACLKILL,
+ ANA_ANEVENTS_ACLUSED,
+ ANA_ANEVENTS_AUTOAGE,
+ ANA_ANEVENTS_VS2TTL1,
+ ANA_ANEVENTS_STORM_DROP,
+ ANA_ANEVENTS_LEARN_DROP,
+ ANA_ANEVENTS_AGED_ENTRY,
+ ANA_ANEVENTS_CPU_LEARN_FAILED,
+ ANA_ANEVENTS_AUTO_LEARN_FAILED,
+ ANA_ANEVENTS_LEARN_REMOVE,
+ ANA_ANEVENTS_AUTO_LEARNED,
+ ANA_ANEVENTS_AUTO_MOVED,
+ ANA_ANEVENTS_DROPPED,
+ ANA_ANEVENTS_CLASSIFIED_DROP,
+ ANA_ANEVENTS_CLASSIFIED_COPY,
+ ANA_ANEVENTS_VLAN_DISCARD,
+ ANA_ANEVENTS_FWD_DISCARD,
+ ANA_ANEVENTS_MULTICAST_FLOOD,
+ ANA_ANEVENTS_UNICAST_FLOOD,
+ ANA_ANEVENTS_DEST_KNOWN,
+ ANA_ANEVENTS_BUCKET3_MATCH,
+ ANA_ANEVENTS_BUCKET2_MATCH,
+ ANA_ANEVENTS_BUCKET1_MATCH,
+ ANA_ANEVENTS_BUCKET0_MATCH,
+ ANA_ANEVENTS_CPU_OPERATION,
+ ANA_ANEVENTS_DMAC_LOOKUP,
+ ANA_ANEVENTS_SMAC_LOOKUP,
+ ANA_ANEVENTS_SEQ_GEN_ERR_0,
+ ANA_ANEVENTS_SEQ_GEN_ERR_1,
+ ANA_TABLES_MACACCESS_B_DOM,
+ ANA_TABLES_MACTINDX_BUCKET,
+ ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_RESET_CFG_CORE_ENA,
+ SYS_RESET_CFG_MEM_ENA,
+ SYS_RESET_CFG_MEM_INIT,
+ REGFIELD_MAX
+};
+
+struct ocelot_multicast {
+ struct list_head list;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+};
+
+struct ocelot_port;
+
+struct ocelot_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct ocelot {
+ struct device *dev;
+
+ struct regmap *targets[TARGET_MAX];
+ struct regmap_field *regfields[REGFIELD_MAX];
+ const u32 *const *map;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+
+ u8 base_mac[ETH_ALEN];
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct workqueue_struct *ocelot_owq;
+
+ int shared_queue_sz;
+
+ u8 num_phys_ports;
+ u8 num_cpu_ports;
+ struct ocelot_port **ports;
+
+ u16 lags[16];
+
+ /* Keep track of the vlan port masks */
+ u32 vlan_mask[VLAN_N_VID];
+
+ struct list_head multicast;
+
+ /* Workqueue to check statistics for overflow with its lock */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+};
+
+struct ocelot_port {
+ struct net_device *dev;
+ struct ocelot *ocelot;
+ struct phy_device *phy;
+ void __iomem *regs;
+ u8 chip_port;
+ /* Keep a track of the mc addresses added to the mac table, so that they
+ * can be removed when needed.
+ */
+ struct list_head mc;
+
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+ /* Egress default VLAN (vid) */
+ u16 vid;
+
+ u8 vlan_aware;
+
+ u64 *stats;
+};
+
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
+#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
+
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask,
+ u32 offset);
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields);
+struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
+ struct platform_device *pdev,
+ const char *name);
+
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
+
+int ocelot_init(struct ocelot *ocelot);
+void ocelot_deinit(struct ocelot *ocelot);
+int ocelot_chip_init(struct ocelot *ocelot);
+int ocelot_probe_port(struct ocelot *ocelot, u8 port,
+ void __iomem *regs,
+ struct phy_device *phy);
+
+extern struct notifier_block ocelot_netdevice_nb;
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h
new file mode 100644
index 000000000000..841c6ec22b64
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ana.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ANA_H_
+#define _MSCC_OCELOT_ANA_H_
+
+#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
+#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
+#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
+#define ANA_ANAGEFIL_PID_EN BIT(19)
+#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
+#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
+#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
+#define ANA_ANAGEFIL_VID_EN BIT(13)
+#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
+#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
+
+#define ANA_STORMLIMIT_CFG_RSZ 0x4
+
+#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
+#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
+#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
+#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
+#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
+
+#define ANA_AUTOAGE_AGE_FAST BIT(21)
+#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
+#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
+#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
+
+#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
+#define ANA_MACTOPTIONS_SHADOW BIT(0)
+
+#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
+#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
+#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
+#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
+#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
+#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
+#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
+#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
+#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
+#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
+#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
+#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
+#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
+#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
+
+#define ANA_FLOODING_RSZ 0x4
+
+#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
+
+#define ANA_SFLOW_CFG_RSZ 0x4
+
+#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
+#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
+#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
+
+#define ANA_PORT_MODE_RSZ 0x4
+
+#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
+
+#define ANA_CUT_THRU_CFG_RSZ 0x4
+
+#define ANA_PGID_PGID_RSZ 0x4
+
+#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
+#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
+#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
+#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
+
+#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
+#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
+#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
+#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
+#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
+
+#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
+#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
+#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
+#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
+#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
+#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
+
+#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
+#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
+#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
+#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
+#define ANA_TABLES_MACACCESS_VALID BIT(11)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
+#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
+#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
+#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
+#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
+#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
+
+#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
+#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
+#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
+#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
+#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
+#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
+#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
+#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
+
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
+#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
+
+#define ANA_TABLES_ENTRYLIM_RSZ 0x4
+
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
+
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
+#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
+#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
+#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
+#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
+#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
+#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
+
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
+
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
+
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
+#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
+#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
+#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
+#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
+#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
+#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
+#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
+
+#define ANA_MSTI_STATE_RSZ 0x4
+
+#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
+
+#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
+#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
+#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+
+#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
+
+#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
+#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
+#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+
+#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
+
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
+#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
+#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+
+#define ANA_PORT_VLAN_CFG_GSZ 0x100
+
+#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
+#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
+#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
+#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
+
+#define ANA_PORT_DROP_CFG_GSZ 0x100
+
+#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
+#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
+#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+
+#define ANA_PORT_QOS_CFG_GSZ 0x100
+
+#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
+#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
+
+#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
+#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
+
+#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
+
+#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
+#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
+#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_PORT_CFG_GSZ 0x100
+
+#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
+#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
+#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
+#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
+#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
+#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
+#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
+#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
+#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
+#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
+#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
+
+#define ANA_PORT_POL_CFG_GSZ 0x100
+
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
+#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
+#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
+
+#define ANA_PORT_PTP_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
+
+#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
+
+#define ANA_PORT_SFID_CFG_GSZ 0x100
+#define ANA_PORT_SFID_CFG_RSZ 0x4
+
+#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
+#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
+
+#define ANA_PFC_PFC_CFG_GSZ 0x40
+
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
+
+#define ANA_PFC_PFC_TIMER_GSZ 0x40
+#define ANA_PFC_PFC_TIMER_RSZ 0x4
+
+#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
+
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
+
+#define ANA_IPT_IPT_GSZ 0x8
+
+#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
+#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
+#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
+#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
+#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
+#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
+
+#define ANA_PPT_PPT_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
+
+#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
+#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
+#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
+#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
+#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
+
+#define ANA_CPUQ_8021_CFG_RSZ 0x4
+
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
+
+#define ANA_DSCP_CFG_RSZ 0x4
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+
+#define ANA_DSCP_REWR_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
+
+#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
+#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
+
+#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
+#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
+
+#define ANA_FID_CFG_VID_MC_ENA BIT(0)
+
+#define ANA_POL_PIR_CFG_GSZ 0x20
+
+#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_CIR_CFG_GSZ 0x20
+
+#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_MODE_CFG_GSZ 0x20
+
+#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
+#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
+#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
+#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
+#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
+#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
+#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
+
+#define ANA_POL_PIR_STATE_GSZ 0x20
+
+#define ANA_POL_CIR_STATE_GSZ 0x20
+
+#define ANA_POL_STATE_GSZ 0x20
+
+#define ANA_POL_FLOWC_RSZ 0x4
+
+#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
+
+#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
+#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
+#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
+#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
+#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
+
+#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
+#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
new file mode 100644
index 000000000000..18df7d934e81
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+
+#include "ocelot.h"
+
+static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
+{
+ int i;
+ u8 llen, wlen;
+
+ /* The IFH is in network order, switch to CPU order */
+ for (i = 0; i < IFH_LEN; i++)
+ ifh[i] = ntohl((__force __be32)ifh[i]);
+
+ wlen = (ifh[1] >> 7) & 0xff;
+ llen = (ifh[1] >> 15) & 0x3f;
+ info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
+
+ info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
+
+ info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
+ info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->vid = ifh[3] & GENMASK(11, 0);
+
+ return 0;
+}
+
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+ u32 *rval)
+{
+ u32 val;
+ u32 bytes_valid;
+
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_NOT_READY) {
+ if (ifh)
+ return -EIO;
+
+ do {
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ } while (val == XTR_NOT_READY);
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_ESCAPE)
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+{
+ struct ocelot *ocelot = arg;
+ int i = 0, grp = 0;
+ int err = 0;
+
+ if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ u32 *buf;
+ int sz, len;
+ u32 ifh[4];
+ u32 val;
+ struct frame_info info;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
+ if (err != 4)
+ break;
+ }
+
+ if (err != 4)
+ break;
+
+ ocelot_parse_ifh(ifh, &info);
+
+ dev = ocelot->ports[info.port]->dev;
+
+ skb = netdev_alloc_skb(dev, info.len);
+
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf = (u32 *)skb_put(skb, info.len);
+
+ len = 0;
+ do {
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ *buf++ = val;
+ len += sz;
+ } while ((sz == 4) && (len < info.len));
+
+ if (sz < 0) {
+ err = sz;
+ break;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded.
+ */
+ if (ocelot->bridge_mask & BIT(info.port))
+ skb->offload_fwd_mark = 1;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ if (err)
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id mscc_ocelot_match[] = {
+ { .compatible = "mscc,vsc7514-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+
+static int mscc_ocelot_probe(struct platform_device *pdev)
+{
+ int err, irq;
+ unsigned int i;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ports, *portnp;
+ struct ocelot *ocelot;
+ u32 val;
+
+ struct {
+ enum ocelot_target id;
+ char *name;
+ } res[] = {
+ { SYS, "sys" },
+ { REW, "rew" },
+ { QSYS, "qsys" },
+ { ANA, "ana" },
+ { QS, "qs" },
+ { HSIO, "hsio" },
+ };
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
+ if (!ocelot)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ocelot);
+ ocelot->dev = &pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(res); i++) {
+ struct regmap *target;
+
+ target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
+ if (IS_ERR(target))
+ return PTR_ERR(target);
+
+ ocelot->targets[res[i].id] = target;
+ }
+
+ err = ocelot_chip_init(ocelot);
+ if (err)
+ return err;
+
+ irq = platform_get_irq_byname(pdev, "xtr");
+ if (irq < 0)
+ return -ENODEV;
+
+ err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ocelot_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", ocelot);
+ if (err)
+ return err;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val);
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
+
+ ports = of_get_child_by_name(np, "ethernet-ports");
+ if (!ports) {
+ dev_err(&pdev->dev, "no ethernet-ports child node found\n");
+ return -ENODEV;
+ }
+
+ ocelot->num_phys_ports = of_get_child_count(ports);
+
+ ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+
+ INIT_LIST_HEAD(&ocelot->multicast);
+ ocelot_init(ocelot);
+
+ ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_6_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE,
+ HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_6_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE,
+ HSIO_HW_CFG);
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct resource *res;
+ void __iomem *regs;
+ char res_name[8];
+ u32 port;
+
+ if (of_property_read_u32(portnp, "reg", &port))
+ continue;
+
+ snprintf(res_name, sizeof(res_name), "port%d", port);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ continue;
+
+ phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, regs, phy);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+ }
+
+ register_netdevice_notifier(&ocelot_netdevice_nb);
+
+ dev_info(&pdev->dev, "Ocelot switch probed\n");
+
+ return 0;
+
+err_probe_ports:
+ return err;
+}
+
+static int mscc_ocelot_remove(struct platform_device *pdev)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+
+ ocelot_deinit(ocelot);
+ unregister_netdevice_notifier(&ocelot_netdevice_nb);
+
+ return 0;
+}
+
+static struct platform_driver mscc_ocelot_driver = {
+ .probe = mscc_ocelot_probe,
+ .remove = mscc_ocelot_remove,
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = mscc_ocelot_match,
+ },
+};
+
+module_platform_driver(mscc_ocelot_driver);
+
+MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h
new file mode 100644
index 000000000000..0a50d53bbd3f
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_dev.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_H_
+#define _MSCC_OCELOT_DEV_H_
+
+#define DEV_CLOCK_CFG 0x0
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_PORT_MISC 0x4
+
+#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
+#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
+
+#define DEV_EVENTS 0x8
+
+#define DEV_EEE_CFG 0xc
+
+#define DEV_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_RX_PATH_DELAY 0x10
+
+#define DEV_TX_PATH_DELAY 0x14
+
+#define DEV_PTP_PREDICT_CFG 0x18
+
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
+
+#define DEV_MAC_ENA_CFG 0x1c
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_MAC_MODE_CFG 0x20
+
+#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_MAC_MAXLEN_CFG 0x24
+
+#define DEV_MAC_TAGS_CFG 0x28
+
+#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+
+#define DEV_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_MAC_IFG_CFG 0x30
+
+#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_MAC_HDX_CFG 0x34
+
+#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_MAC_DBG_CFG 0x38
+
+#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_MAC_STICKY 0x44
+
+#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define PCS1G_CFG 0x48
+
+#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define PCS1G_CFG_PCS_ENA BIT(0)
+
+#define PCS1G_MODE_CFG 0x4c
+
+#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+
+#define PCS1G_SD_CFG 0x50
+
+#define PCS1G_SD_CFG_SD_SEL BIT(8)
+#define PCS1G_SD_CFG_SD_POL BIT(4)
+#define PCS1G_SD_CFG_SD_ENA BIT(0)
+
+#define PCS1G_ANEG_CFG 0x54
+
+#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+
+#define PCS1G_ANEG_NP_CFG 0x58
+
+#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
+#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
+
+#define PCS1G_LB_CFG 0x5c
+
+#define PCS1G_LB_CFG_RA_ENA BIT(4)
+#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+
+#define PCS1G_DBG_CFG 0x60
+
+#define PCS1G_DBG_CFG_UDLT BIT(0)
+
+#define PCS1G_CDET_CFG 0x64
+
+#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
+
+#define PCS1G_ANEG_STATUS 0x68
+
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_STATUS_PR BIT(4)
+#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+
+#define PCS1G_ANEG_NP_STATUS 0x6c
+
+#define PCS1G_LINK_STATUS 0x70
+
+#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
+#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
+#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+
+#define PCS1G_LINK_DOWN_CNT 0x74
+
+#define PCS1G_STICKY 0x78
+
+#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+
+#define PCS1G_DEBUG_STATUS 0x7c
+
+#define PCS1G_LPI_CFG 0x80
+
+#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
+#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
+#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
+
+#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
+
+#define PCS1G_LPI_STATUS 0x88
+
+#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
+#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
+#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
+#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
+#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
+#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
+#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
+
+#define PCS1G_TSTPAT_MODE_CFG 0x8c
+
+#define PCS1G_TSTPAT_STATUS 0x90
+
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
+#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
+
+#define DEV_PCS_FX100_CFG 0x94
+
+#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
+#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
+#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
+
+#define DEV_PCS_FX100_STATUS 0x98
+
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
new file mode 100644
index 000000000000..6aa40ea223a2
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_GMII_H_
+#define _MSCC_OCELOT_DEV_GMII_H_
+
+#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0
+
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_GMII_PORT_MODE_PORT_MISC 0x4
+
+#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1)
+#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0)
+
+#define DEV_GMII_PORT_MODE_EVENTS 0x8
+
+#define DEV_GMII_PORT_MODE_EEE_CFG 0xc
+
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10
+
+#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14
+
+#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44
+
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48
+
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
+#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
+
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c
+
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
+#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+
+#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50
+
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
+#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h
new file mode 100644
index 000000000000..d93ddec3931b
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_hsio.h
@@ -0,0 +1,785 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_HSIO_H_
+#define _MSCC_OCELOT_HSIO_H_
+
+#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
+#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
+#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
+#define HSIO_PLL5G_CFG0_DIV4 BIT(28)
+#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27)
+#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23))
+#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23)
+#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15)
+#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14)
+#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13)
+#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18)
+#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17)
+#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16)
+#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15)
+#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5)
+#define HSIO_PLL5G_CFG1_PWD_TX BIT(4)
+#define HSIO_PLL5G_CFG1_PWD_RX BIT(3)
+#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2)
+#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1)
+#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0)
+
+#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30)
+#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29)
+#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28)
+#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27)
+#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26)
+#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24)
+#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15)
+#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14)
+#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13)
+#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12)
+#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10)
+#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5)
+#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4)
+#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3)
+#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1)
+#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0)
+
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22))
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22)
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18)
+#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17)
+#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16)
+#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15)
+#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14)
+#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13)
+#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12)
+#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11)
+#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10)
+#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9)
+#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8)
+#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0))
+#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0)
+
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20))
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7)
+#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6)
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12)
+#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0)
+
+#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21))
+#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21)
+#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4))
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4)
+#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1)
+#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0)
+
+#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0)
+
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0)
+
+#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13)
+#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12)
+#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10))
+#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10)
+#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8)
+#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4)
+#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0)
+
+#define HSIO_RCOMP_STATUS_BUSY BIT(12)
+#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7)
+#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0)
+
+#define HSIO_SYNC_ETH_CFG_RSZ 0x4
+
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0)
+
+#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0)
+
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8))
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0)
+
+#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24))
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14)
+#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13)
+#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12)
+#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11)
+#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10)
+#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17))
+#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17)
+#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9)
+#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9)
+#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8)
+#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31)
+#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21)
+#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18)
+#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17)
+#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13))
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13)
+#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12)
+#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11)
+#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10)
+#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9)
+#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8)
+#define HSIO_S1G_COMMON_CFG_HRATE BIT(7)
+#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0)
+
+#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22)
+#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5)
+#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3)
+
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12)
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11)
+#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10)
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0))
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0)
+
+#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16)
+#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7)
+#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3))
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0)
+
+#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13)
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7)
+#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6)
+#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0)
+
+#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29))
+#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29)
+#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29)
+#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24))
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18))
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15))
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5)
+#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4)
+#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3)
+#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2)
+#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1)
+#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0)
+
+#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17))
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17)
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17)
+#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12))
+#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12)
+#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12)
+#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8))
+#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8)
+#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7)
+#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6)
+#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5)
+#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4)
+#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3)
+#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2)
+#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1)
+#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0)
+
+#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27))
+#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27)
+#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22))
+#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22)
+#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22)
+#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19)
+#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16)
+#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10)
+#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5)
+#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3))
+#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3)
+#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0)
+
+#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31)
+#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30)
+#define HSIO_S6G_OB_CFG_OB_POL BIT(29)
+#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18)
+#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16)
+#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11))
+#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11)
+#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11)
+#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10)
+#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9)
+#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8)
+#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4)
+#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8)
+#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17)
+#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16)
+#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15)
+#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14)
+#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13)
+#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9))
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8)
+#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7)
+#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6)
+#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5)
+#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4)
+#define HSIO_S6G_COMMON_CFG_HRATE BIT(3)
+#define HSIO_S6G_COMMON_CFG_QRATE BIT(2)
+#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0))
+#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0)
+
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3)
+#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0)
+
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3)
+#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1)
+#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0)
+
+#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16)
+#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0))
+#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0)
+
+#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8)
+#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7)
+#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6)
+#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1)
+#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0)
+
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18)
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0)
+
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2)
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1)
+#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0)
+
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10)
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9)
+#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8)
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26))
+#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26)
+#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26)
+#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21))
+#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21)
+#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21)
+#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16)
+#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10)
+#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5)
+#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0))
+#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0)
+
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0))
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0)
+
+#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6)
+#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5)
+#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4)
+#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3)
+#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2)
+#define HSIO_HW_CFG_PCIE_ENA BIT(1)
+#define HSIO_HW_CFG_QSGMII_ENA BIT(0)
+
+#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3)
+#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2)
+#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1)
+#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0)
+
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1))
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1)
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define HSIO_HW_QSGMII_STAT_SYNC BIT(0)
+
+#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0)
+
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1)
+#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0)
+
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8)
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0)
+
+#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8)
+#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
new file mode 100644
index 000000000000..c6db8ad31fdf
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "ocelot.h"
+
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+ u32 val;
+
+ WARN_ON(!target);
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, &val);
+ return val;
+}
+EXPORT_SYMBOL(__ocelot_read_ix);
+
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, val);
+}
+EXPORT_SYMBOL(__ocelot_write_ix);
+
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_update_bits(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ mask, val);
+}
+EXPORT_SYMBOL(__ocelot_rmw_ix);
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+{
+ return readl(port->regs + reg);
+}
+EXPORT_SYMBOL(ocelot_port_readl);
+
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+{
+ writel(val, port->regs + reg);
+}
+EXPORT_SYMBOL(ocelot_port_writel);
+
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields)
+{
+ unsigned int i;
+ u16 target;
+
+ for (i = 0; i < REGFIELD_MAX; i++) {
+ struct reg_field regfield = {};
+ u32 reg = regfields[i].reg;
+
+ if (!reg)
+ continue;
+
+ target = regfields[i].reg >> TARGET_OFFSET;
+
+ regfield.reg = ocelot->map[target][reg & REG_MASK];
+ regfield.lsb = regfields[i].lsb;
+ regfield.msb = regfields[i].msb;
+
+ ocelot->regfields[i] =
+ devm_regmap_field_alloc(ocelot->dev,
+ ocelot->targets[target],
+ regfield);
+
+ if (IS_ERR(ocelot->regfields[i]))
+ return PTR_ERR(ocelot->regfields[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_regfields_init);
+
+static struct regmap_config ocelot_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+struct regmap *ocelot_io_platform_init(struct ocelot *ocelot,
+ struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+
+ ocelot_regmap_config.name = name;
+ return devm_regmap_init_mmio(ocelot->dev, regs,
+ &ocelot_regmap_config);
+}
+EXPORT_SYMBOL(ocelot_io_platform_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h
new file mode 100644
index 000000000000..d18ae726c01d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_qs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QS_H_
+#define _MSCC_OCELOT_QS_H_
+
+/* TODO handle BE */
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define QS_XTR_GRP_CFG_RSZ 0x4
+
+#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1)
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_XTR_RD_RSZ 0x4
+
+#define QS_XTR_FRM_PRUNING_RSZ 0x4
+
+#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5))
+#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5)
+#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2))
+#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2)
+#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0))
+#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0)
+
+#define QS_INJ_GRP_CFG_RSZ 0x4
+
+#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_INJ_WR_RSZ 0x4
+
+#define QS_INJ_CTRL_RSZ 0x4
+
+#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21))
+#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define QS_INJ_CTRL_ABORT BIT(20)
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16))
+#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16)
+
+#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4))
+#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0))
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0)
+
+#define QS_INJ_ERR_RSZ 0x4
+
+#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1)
+#define QS_INJ_ERR_WR_ERR_STICKY BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h
new file mode 100644
index 000000000000..d8c63aa761be
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_qsys.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QSYS_H_
+#define _MSCC_OCELOT_QSYS_H_
+
+#define QSYS_PORT_MODE_RSZ 0x4
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
+
+#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
+
+#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
+#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
+
+#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
+#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
+#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
+#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
+
+#define QSYS_EEE_CFG_RSZ 0x4
+
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
+
+#define QSYS_SW_STATUS_RSZ 0x4
+
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
+
+#define QSYS_QMAP_GSZ 0x4
+
+#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
+#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
+#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
+#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
+#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
+#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
+
+#define QSYS_ISDX_SGRP_GSZ 0x4
+
+#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
+
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
+#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
+
+#define QSYS_RED_PROFILE_RSZ 0x4
+
+#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
+#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
+#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
+
+#define QSYS_RES_CFG_GSZ 0x8
+
+#define QSYS_RES_STAT_GSZ 0x8
+
+#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
+#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
+#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
+#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+
+#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
+#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
+#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
+
+#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
+#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_CIR_CFG_GSZ 0x80
+
+#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define QSYS_EIR_CFG_GSZ 0x80
+
+#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
+#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
+#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
+#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
+#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
+#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
+
+#define QSYS_SE_CFG_GSZ 0x80
+
+#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
+#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
+#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
+
+#define QSYS_SE_DWRR_CFG_GSZ 0x80
+#define QSYS_SE_DWRR_CFG_RSZ 0x4
+
+#define QSYS_SE_CONNECT_GSZ 0x80
+
+#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
+#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
+#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
+#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
+#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
+#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
+#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
+#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
+#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
+#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
+
+#define QSYS_SE_DLB_SENSE_GSZ 0x80
+
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+
+#define QSYS_CIR_STATE_GSZ 0x80
+
+#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
+#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
+#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
+#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
+#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
+
+#define QSYS_EIR_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
+#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
+#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
+
+#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
+#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
+#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
+#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
+
+#define QSYS_TAG_CONFIG_RSZ 0x4
+
+#define QSYS_TAG_CONFIG_ENABLE BIT(0)
+#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
+#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
+#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
+
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
+#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
+#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
+
+#define QSYS_PORT_MAX_SDU_RSZ 0x4
+
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
+
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
new file mode 100644
index 000000000000..e334b406c40c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include "ocelot.h"
+
+static const u32 ocelot_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+
+static const u32 ocelot_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+
+static const u32 ocelot_hsio_regmap[] = {
+ REG(HSIO_PLL5G_CFG0, 0x000000),
+ REG(HSIO_PLL5G_CFG1, 0x000004),
+ REG(HSIO_PLL5G_CFG2, 0x000008),
+ REG(HSIO_PLL5G_CFG3, 0x00000c),
+ REG(HSIO_PLL5G_CFG4, 0x000010),
+ REG(HSIO_PLL5G_CFG5, 0x000014),
+ REG(HSIO_PLL5G_CFG6, 0x000018),
+ REG(HSIO_PLL5G_STATUS0, 0x00001c),
+ REG(HSIO_PLL5G_STATUS1, 0x000020),
+ REG(HSIO_PLL5G_BIST_CFG0, 0x000024),
+ REG(HSIO_PLL5G_BIST_CFG1, 0x000028),
+ REG(HSIO_PLL5G_BIST_CFG2, 0x00002c),
+ REG(HSIO_PLL5G_BIST_STAT0, 0x000030),
+ REG(HSIO_PLL5G_BIST_STAT1, 0x000034),
+ REG(HSIO_RCOMP_CFG0, 0x000038),
+ REG(HSIO_RCOMP_STATUS, 0x00003c),
+ REG(HSIO_SYNC_ETH_CFG, 0x000040),
+ REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048),
+ REG(HSIO_S1G_DES_CFG, 0x00004c),
+ REG(HSIO_S1G_IB_CFG, 0x000050),
+ REG(HSIO_S1G_OB_CFG, 0x000054),
+ REG(HSIO_S1G_SER_CFG, 0x000058),
+ REG(HSIO_S1G_COMMON_CFG, 0x00005c),
+ REG(HSIO_S1G_PLL_CFG, 0x000060),
+ REG(HSIO_S1G_PLL_STATUS, 0x000064),
+ REG(HSIO_S1G_DFT_CFG0, 0x000068),
+ REG(HSIO_S1G_DFT_CFG1, 0x00006c),
+ REG(HSIO_S1G_DFT_CFG2, 0x000070),
+ REG(HSIO_S1G_TP_CFG, 0x000074),
+ REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078),
+ REG(HSIO_S1G_MISC_CFG, 0x00007c),
+ REG(HSIO_S1G_DFT_STATUS, 0x000080),
+ REG(HSIO_S1G_MISC_STATUS, 0x000084),
+ REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088),
+ REG(HSIO_S6G_DIG_CFG, 0x00008c),
+ REG(HSIO_S6G_DFT_CFG0, 0x000090),
+ REG(HSIO_S6G_DFT_CFG1, 0x000094),
+ REG(HSIO_S6G_DFT_CFG2, 0x000098),
+ REG(HSIO_S6G_TP_CFG0, 0x00009c),
+ REG(HSIO_S6G_TP_CFG1, 0x0000a0),
+ REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4),
+ REG(HSIO_S6G_MISC_CFG, 0x0000a8),
+ REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac),
+ REG(HSIO_S6G_DFT_STATUS, 0x0000b0),
+ REG(HSIO_S6G_ERR_CNT, 0x0000b4),
+ REG(HSIO_S6G_MISC_STATUS, 0x0000b8),
+ REG(HSIO_S6G_DES_CFG, 0x0000bc),
+ REG(HSIO_S6G_IB_CFG, 0x0000c0),
+ REG(HSIO_S6G_IB_CFG1, 0x0000c4),
+ REG(HSIO_S6G_IB_CFG2, 0x0000c8),
+ REG(HSIO_S6G_IB_CFG3, 0x0000cc),
+ REG(HSIO_S6G_IB_CFG4, 0x0000d0),
+ REG(HSIO_S6G_IB_CFG5, 0x0000d4),
+ REG(HSIO_S6G_OB_CFG, 0x0000d8),
+ REG(HSIO_S6G_OB_CFG1, 0x0000dc),
+ REG(HSIO_S6G_SER_CFG, 0x0000e0),
+ REG(HSIO_S6G_COMMON_CFG, 0x0000e4),
+ REG(HSIO_S6G_PLL_CFG, 0x0000e8),
+ REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec),
+ REG(HSIO_S6G_GP_CFG, 0x0000f0),
+ REG(HSIO_S6G_IB_STATUS0, 0x0000f4),
+ REG(HSIO_S6G_IB_STATUS1, 0x0000f8),
+ REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc),
+ REG(HSIO_S6G_PLL_STATUS, 0x000100),
+ REG(HSIO_S6G_REVID, 0x000104),
+ REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108),
+ REG(HSIO_HW_CFG, 0x00010c),
+ REG(HSIO_HW_QSGMII_CFG, 0x000110),
+ REG(HSIO_HW_QSGMII_STAT, 0x000114),
+ REG(HSIO_CLK_CFG, 0x000118),
+ REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c),
+ REG(HSIO_TEMP_SENSOR_CFG, 0x000120),
+ REG(HSIO_TEMP_SENSOR_STAT, 0x000124),
+};
+
+static const u32 ocelot_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+
+static const u32 ocelot_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+
+static const u32 ocelot_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+
+static const u32 *ocelot_regmap[] = {
+ [ANA] = ocelot_ana_regmap,
+ [QS] = ocelot_qs_regmap,
+ [HSIO] = ocelot_hsio_regmap,
+ [QSYS] = ocelot_qsys_regmap,
+ [REW] = ocelot_rew_regmap,
+ [SYS] = ocelot_sys_regmap,
+};
+
+static const struct reg_field ocelot_regfields[] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+};
+
+static const struct ocelot_stat_layout ocelot_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02, },
+ { .name = "rx_broadcast", .offset = 0x03, },
+ { .name = "rx_shorts", .offset = 0x04, },
+ { .name = "rx_fragments", .offset = 0x05, },
+ { .name = "rx_jabbers", .offset = 0x06, },
+ { .name = "rx_crc_align_errs", .offset = 0x07, },
+ { .name = "rx_sym_errs", .offset = 0x08, },
+ { .name = "rx_frames_below_65_octets", .offset = 0x09, },
+ { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
+ { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
+ { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
+ { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
+ { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
+ { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
+ { .name = "rx_pause", .offset = 0x10, },
+ { .name = "rx_control", .offset = 0x11, },
+ { .name = "rx_longs", .offset = 0x12, },
+ { .name = "rx_classified_drops", .offset = 0x13, },
+ { .name = "rx_red_prio_0", .offset = 0x14, },
+ { .name = "rx_red_prio_1", .offset = 0x15, },
+ { .name = "rx_red_prio_2", .offset = 0x16, },
+ { .name = "rx_red_prio_3", .offset = 0x17, },
+ { .name = "rx_red_prio_4", .offset = 0x18, },
+ { .name = "rx_red_prio_5", .offset = 0x19, },
+ { .name = "rx_red_prio_6", .offset = 0x1A, },
+ { .name = "rx_red_prio_7", .offset = 0x1B, },
+ { .name = "rx_yellow_prio_0", .offset = 0x1C, },
+ { .name = "rx_yellow_prio_1", .offset = 0x1D, },
+ { .name = "rx_yellow_prio_2", .offset = 0x1E, },
+ { .name = "rx_yellow_prio_3", .offset = 0x1F, },
+ { .name = "rx_yellow_prio_4", .offset = 0x20, },
+ { .name = "rx_yellow_prio_5", .offset = 0x21, },
+ { .name = "rx_yellow_prio_6", .offset = 0x22, },
+ { .name = "rx_yellow_prio_7", .offset = 0x23, },
+ { .name = "rx_green_prio_0", .offset = 0x24, },
+ { .name = "rx_green_prio_1", .offset = 0x25, },
+ { .name = "rx_green_prio_2", .offset = 0x26, },
+ { .name = "rx_green_prio_3", .offset = 0x27, },
+ { .name = "rx_green_prio_4", .offset = 0x28, },
+ { .name = "rx_green_prio_5", .offset = 0x29, },
+ { .name = "rx_green_prio_6", .offset = 0x2A, },
+ { .name = "rx_green_prio_7", .offset = 0x2B, },
+ { .name = "tx_octets", .offset = 0x40, },
+ { .name = "tx_unicast", .offset = 0x41, },
+ { .name = "tx_multicast", .offset = 0x42, },
+ { .name = "tx_broadcast", .offset = 0x43, },
+ { .name = "tx_collision", .offset = 0x44, },
+ { .name = "tx_drops", .offset = 0x45, },
+ { .name = "tx_pause", .offset = 0x46, },
+ { .name = "tx_frames_below_65_octets", .offset = 0x47, },
+ { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
+ { .name = "tx_frames_128_255_octets", .offset = 0x49, },
+ { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
+ { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
+ { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
+ { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
+ { .name = "tx_yellow_prio_0", .offset = 0x4E, },
+ { .name = "tx_yellow_prio_1", .offset = 0x4F, },
+ { .name = "tx_yellow_prio_2", .offset = 0x50, },
+ { .name = "tx_yellow_prio_3", .offset = 0x51, },
+ { .name = "tx_yellow_prio_4", .offset = 0x52, },
+ { .name = "tx_yellow_prio_5", .offset = 0x53, },
+ { .name = "tx_yellow_prio_6", .offset = 0x54, },
+ { .name = "tx_yellow_prio_7", .offset = 0x55, },
+ { .name = "tx_green_prio_0", .offset = 0x56, },
+ { .name = "tx_green_prio_1", .offset = 0x57, },
+ { .name = "tx_green_prio_2", .offset = 0x58, },
+ { .name = "tx_green_prio_3", .offset = 0x59, },
+ { .name = "tx_green_prio_4", .offset = 0x5A, },
+ { .name = "tx_green_prio_5", .offset = 0x5B, },
+ { .name = "tx_green_prio_6", .offset = 0x5C, },
+ { .name = "tx_green_prio_7", .offset = 0x5D, },
+ { .name = "tx_aged", .offset = 0x5E, },
+ { .name = "drop_local", .offset = 0x80, },
+ { .name = "drop_tail", .offset = 0x81, },
+ { .name = "drop_yellow_prio_0", .offset = 0x82, },
+ { .name = "drop_yellow_prio_1", .offset = 0x83, },
+ { .name = "drop_yellow_prio_2", .offset = 0x84, },
+ { .name = "drop_yellow_prio_3", .offset = 0x85, },
+ { .name = "drop_yellow_prio_4", .offset = 0x86, },
+ { .name = "drop_yellow_prio_5", .offset = 0x87, },
+ { .name = "drop_yellow_prio_6", .offset = 0x88, },
+ { .name = "drop_yellow_prio_7", .offset = 0x89, },
+ { .name = "drop_green_prio_0", .offset = 0x8A, },
+ { .name = "drop_green_prio_1", .offset = 0x8B, },
+ { .name = "drop_green_prio_2", .offset = 0x8C, },
+ { .name = "drop_green_prio_3", .offset = 0x8D, },
+ { .name = "drop_green_prio_4", .offset = 0x8E, },
+ { .name = "drop_green_prio_5", .offset = 0x8F, },
+ { .name = "drop_green_prio_6", .offset = 0x90, },
+ { .name = "drop_green_prio_7", .offset = 0x91, },
+};
+
+static void ocelot_pll5_init(struct ocelot *ocelot)
+{
+ /* Configure PLL5. This will need a proper CCF driver
+ * The values are coming from the VTSS API for Ocelot
+ */
+ ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
+ ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
+ HSIO_PLL5G_CFG0_ENA_BIAS |
+ HSIO_PLL5G_CFG0_ENA_VCO_BUF |
+ HSIO_PLL5G_CFG0_ENA_CP1 |
+ HSIO_PLL5G_CFG0_SELCPI(2) |
+ HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
+ HSIO_PLL5G_CFG0_SELBGV820(4) |
+ HSIO_PLL5G_CFG0_DIV4 |
+ HSIO_PLL5G_CFG0_ENA_CLKTREE |
+ HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
+ ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
+ HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
+ HSIO_PLL5G_CFG2_ENA_AMPCTRL |
+ HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+}
+
+int ocelot_chip_init(struct ocelot *ocelot)
+{
+ int ret;
+
+ ocelot->map = ocelot_regmap;
+ ocelot->stats_layout = ocelot_stats_layout;
+ ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
+ ocelot->shared_queue_sz = 224 * 1024;
+
+ ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ if (ret)
+ return ret;
+
+ ocelot_pll5_init(ocelot);
+
+ eth_random_addr(ocelot->base_mac);
+ ocelot->base_mac[5] &= 0xf0;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_chip_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h
new file mode 100644
index 000000000000..210914b7e20f
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_rew.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_REW_H_
+#define _MSCC_OCELOT_REW_H_
+
+#define REW_PORT_VLAN_CFG_GSZ 0x80
+
+#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16))
+#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15)
+#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12)
+#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0))
+#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0)
+
+#define REW_TAG_CFG_GSZ 0x80
+
+#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7))
+#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5))
+#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define REW_TAG_CFG_TAG_VID_CFG BIT(4)
+#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2)
+#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0))
+#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0)
+
+#define REW_PORT_CFG_GSZ 0x80
+
+#define REW_PORT_CFG_ES0_EN BIT(5)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3))
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2)
+#define REW_PORT_CFG_FLUSH_ENA BIT(1)
+#define REW_PORT_CFG_AGE_DIS BIT(0)
+
+#define REW_DSCP_CFG_GSZ 0x80
+
+#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80
+#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4
+
+#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3)
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0))
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0)
+
+#define REW_PTP_CFG_GSZ 0x80
+
+#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7)
+#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3))
+#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3)
+#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2)
+#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1)
+#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0)
+
+#define REW_PTP_DLY1_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0)
+
+#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4
+
+#define REW_DSCP_REMAP_CFG_RSZ 0x4
+
+#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0)
+
+#define REW_PPT_RSZ 0x4
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h
new file mode 100644
index 000000000000..16f91e172bcb
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_sys.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_SYS_H_
+#define _MSCC_OCELOT_SYS_H_
+
+#define SYS_COUNT_RX_OCTETS_RSZ 0x4
+
+#define SYS_COUNT_TX_OCTETS_RSZ 0x4
+
+#define SYS_PORT_MODE_RSZ 0x4
+
+#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
+#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
+#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
+#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
+#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
+#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
+#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
+
+#define SYS_FRONT_PORT_MODE_RSZ 0x4
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0))
+#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0)
+
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10))
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10)
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10)
+#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0))
+#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0)
+
+#define SYS_SW_STATUS_RSZ 0x4
+
+#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0)
+
+#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1)
+#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0)
+
+#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4
+
+#define SYS_REW_MAC_LOW_CFG_RSZ 0x4
+
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6))
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6)
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6)
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
+
+#define SYS_PAUSE_CFG_RSZ 0x4
+
+#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
+#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
+#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
+#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0)
+
+#define SYS_ATOP_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26))
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20))
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0))
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0)
+
+#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SYS_MMGT_RELCNT_M GENMASK(31, 16)
+#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0))
+#define SYS_MMGT_FREECNT_M GENMASK(15, 0)
+
+#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4))
+#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4)
+#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0))
+#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0)
+
+#define SYS_EVENTS_DIF_RSZ 0x4
+
+#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6))
+#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6)
+#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0))
+#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0)
+
+#define SYS_EVENTS_CORE_EV_FWR BIT(2)
+#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0))
+#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0)
+
+#define SYS_CNT_GSZ 0x4
+
+#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29)
+#define SYS_PTP_STATUS_PTP_OVFL BIT(28)
+#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27)
+#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21))
+#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21)
+#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16))
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0))
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0)
+
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0))
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0)
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31)
+
+#define SYS_PTP_NXT_PTP_NXT BIT(0)
+
+#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2))
+#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2)
+#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0))
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 7e298148ca26..cb87fccb9f6a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
return nfp_bpf_cmsg_alloc(bpf, size);
}
+static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return hdr->type;
+}
+
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
@@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
goto err_free;
}
+ if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+ nfp_bpf_event_output(bpf, skb);
+ return;
+ }
+
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_cmsg_get_tag(skb);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 39639ac28b01..4c7972e3db63 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,11 +37,20 @@
#include <linux/bitops.h>
#include <linux/types.h>
+/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
+ * our FW ABI. In that case we will do translation in the driver.
+ */
+#define NFP_BPF_SCALAR_VALUE 1
+#define NFP_BPF_MAP_VALUE 4
+#define NFP_BPF_STACK 6
+#define NFP_BPF_PACKET_DATA 8
+
enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
NFP_BPF_CAP_TYPE_MAPS = 3,
NFP_BPF_CAP_TYPE_RANDOM = 4,
+ NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
};
struct nfp_bpf_cap_tlv_func {
@@ -81,6 +90,7 @@ enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7,
+ CMSG_TYPE_BPF_EVENT = 8,
__CMSG_TYPE_MAP_MAX,
};
@@ -155,4 +165,13 @@ struct cmsg_reply_map_op {
__be32 resv;
struct cmsg_key_value_pair elem[0];
};
+
+struct cmsg_bpf_event {
+ struct cmsg_hdr hdr;
+ __be32 cpu_id;
+ __be64 map_ptr;
+ __be32 data_size;
+ __be32 pkt_size;
+ u8 data[0];
+};
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 29b4e5f8c102..a4d3da215863 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,6 +42,7 @@
#include "main.h"
#include "../nfp_asm.h"
+#include "../nfp_net_ctrl.h"
/* --- NFP prog --- */
/* Foreach "multiple" entries macros provide pos and next<n> pointers.
@@ -1214,45 +1215,83 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static const struct jmp_code_map {
+ enum br_mask br_mask;
+ bool swap;
+} jmp_code_map[] = {
+ [BPF_JGT >> 4] = { BR_BLO, true },
+ [BPF_JGE >> 4] = { BR_BHS, false },
+ [BPF_JLT >> 4] = { BR_BLO, false },
+ [BPF_JLE >> 4] = { BR_BHS, true },
+ [BPF_JSGT >> 4] = { BR_BLT, true },
+ [BPF_JSGE >> 4] = { BR_BGE, false },
+ [BPF_JSLT >> 4] = { BR_BLT, false },
+ [BPF_JSLE >> 4] = { BR_BGE, true },
+};
+
+static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
+{
+ unsigned int op;
+
+ op = BPF_OP(meta->insn.code) >> 4;
+ /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
+ if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
+ !jmp_code_map[op].br_mask,
+ "no code found for jump instruction"))
+ return NULL;
+
+ return &jmp_code_map[op];
+}
+
+static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ const struct jmp_code_map *code;
+ enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
+ alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
+ carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
+
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
- if (!swap)
- emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ if (!code->swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
else
- emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+ emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
- if (!swap)
+ if (!code->swap)
emit_alu(nfp_prog, reg_none(),
- reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ reg_a(reg + 1), carry_op, tmp_reg);
else
emit_alu(nfp_prog, reg_none(),
- tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+ tmp_reg, carry_op, reg_a(reg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
-static int
-wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum br_mask br_mask, bool swap)
+static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ const struct jmp_code_map *code;
u8 areg, breg;
+ code = nfp_jmp_code_get(meta);
+ if (!code)
+ return -EINVAL;
+
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (swap) {
+ if (code->swap) {
areg ^= breg;
breg ^= areg;
areg ^= breg;
@@ -1261,7 +1300,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
emit_alu(nfp_prog, reg_none(),
reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
- emit_br(nfp_prog, br_mask, insn->off, 0);
+ emit_br(nfp_prog, code->br_mask, insn->off, 0);
return 0;
}
@@ -1357,15 +1396,9 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int
map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- struct bpf_offloaded_map *offmap;
- struct nfp_bpf_map *nfp_map;
bool load_lm_ptr;
u32 ret_tgt;
s64 lm_off;
- swreg tid;
-
- offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
- nfp_map = offmap->dev_priv;
/* We only have to reload LM0 if the key is not at start of stack */
lm_off = nfp_prog->stack_depth;
@@ -1378,17 +1411,12 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (meta->func_id == BPF_FUNC_map_update_elem)
emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
- /* Load map ID into a register, it should actually fit as an immediate
- * but in case it doesn't deal with it here, not in the delay slots.
- */
- tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
-
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
2, RELO_BR_HELPER);
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
/* Load map ID into A0 */
- wrp_mov(nfp_prog, reg_a(0), tid);
+ wrp_mov(nfp_prog, reg_a(0), reg_a(2));
/* Load the return address into B0 */
wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
@@ -1400,7 +1428,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (!load_lm_ptr)
return 0;
- emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
wrp_nops(nfp_prog, 3);
return 0;
@@ -1418,6 +1446,63 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int
+nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ swreg ptr_type;
+ u32 ret_tgt;
+
+ ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
+
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
+ 2, RELO_BR_HELPER);
+
+ /* Load ptr type into A1 */
+ wrp_mov(nfp_prog, reg_a(1), ptr_type);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 jmp_tgt;
+
+ jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
+
+ /* Make sure the queue id fits into FW field */
+ emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
+ ALU_OP_AND_NOT_B, reg_imm(0xff));
+ emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
+
+ /* Set the 'queue selected' bit and the queue value */
+ emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
+ pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
+ SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
+ emit_ld_field(nfp_prog,
+ pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
+ SHF_SC_NONE, 0);
+ /* Delay slots end here, we will jump over next instruction if queue
+ * value fits into the field.
+ */
+ emit_ld_field(nfp_prog,
+ pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
+ SHF_SC_NONE, 0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
+ return -EINVAL;
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -2108,6 +2193,17 @@ mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
false, wrp_lmem_store);
}
+static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, rx_queue_index):
+ return nfp_queue_select(nfp_prog, meta);
+ }
+
+ WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
+ return -EOPNOTSUPP;
+}
+
static int
mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
@@ -2134,6 +2230,9 @@ static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->ptr.type == PTR_TO_CTX)
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
+ return mem_stx_xdp(nfp_prog, meta);
return mem_stx(nfp_prog, meta, 4);
}
@@ -2283,46 +2382,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -2392,46 +2451,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
-}
-
-static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
-}
-
-static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
-}
-
-static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
-}
-
-static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
-}
-
-static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
-}
-
-static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
-}
-
-static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
-}
-
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -2453,6 +2472,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return map_call_stack_common(nfp_prog, meta);
case BPF_FUNC_get_prandom_u32:
return nfp_get_prandom_u32(nfp_prog, meta);
+ case BPF_FUNC_perf_event_output:
+ return nfp_perf_event_output(nfp_prog, meta);
default:
WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP;
@@ -2520,25 +2541,25 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
- [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
- [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
- [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
- [BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
- [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
- [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
- [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
- [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
- [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
- [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
- [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
- [BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
- [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
- [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
- [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
- [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
@@ -2777,6 +2798,54 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
+/* abs(insn.imm) will fit better into unrestricted reg immediate -
+ * convert add/sub of a negative number into a sub/add of a positive one.
+ */
+static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ if (meta->skip)
+ continue;
+
+ if (BPF_CLASS(insn.code) != BPF_ALU &&
+ BPF_CLASS(insn.code) != BPF_ALU64 &&
+ BPF_CLASS(insn.code) != BPF_JMP)
+ continue;
+ if (BPF_SRC(insn.code) != BPF_K)
+ continue;
+ if (insn.imm >= 0)
+ continue;
+
+ if (BPF_CLASS(insn.code) == BPF_JMP) {
+ switch (BPF_OP(insn.code)) {
+ case BPF_JGE:
+ case BPF_JSGE:
+ case BPF_JLT:
+ case BPF_JSLT:
+ meta->jump_neg_op = true;
+ break;
+ default:
+ continue;
+ }
+ } else {
+ if (BPF_OP(insn.code) == BPF_ADD)
+ insn.code = BPF_CLASS(insn.code) | BPF_SUB;
+ else if (BPF_OP(insn.code) == BPF_SUB)
+ insn.code = BPF_CLASS(insn.code) | BPF_ADD;
+ else
+ continue;
+
+ meta->insn.code = insn.code | BPF_K;
+ }
+
+ meta->insn.imm = -insn.imm;
+ }
+}
+
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -3212,6 +3281,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
+ nfp_bpf_opt_neg_add_sub(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
nfp_bpf_opt_ldst_gather(nfp_prog);
@@ -3220,6 +3290,33 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
return 0;
}
+static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ struct nfp_bpf_map *nfp_map;
+ struct bpf_map *map;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ if (meta1->skip || meta2->skip)
+ continue;
+
+ if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
+ meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
+ continue;
+
+ map = (void *)(unsigned long)((u32)meta1->insn.imm |
+ (u64)meta2->insn.imm << 32);
+ if (bpf_map_offload_neutral(map))
+ continue;
+ nfp_map = map_to_offmap(map)->dev_priv;
+
+ meta1->insn.imm = nfp_map->tid;
+ meta2->insn.imm = 0;
+ }
+
+ return 0;
+}
+
static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
__le64 *ustore = (__force __le64 *)prog;
@@ -3256,6 +3353,10 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
+ ret = nfp_bpf_replace_map_ptrs(nfp_prog);
+ if (ret)
+ return ret;
+
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
return ret;
@@ -3346,6 +3447,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
case BPF_FUNC_map_delete_elem:
val = nfp_prog->bpf->helpers.map_delete;
break;
+ case BPF_FUNC_perf_event_output:
+ val = nfp_prog->bpf->helpers.perf_event_output;
+ break;
default:
pr_err("relocation of unknown helper %d\n",
val);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 1dc424685f4e..fcdfb8e7fdea 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -43,6 +43,14 @@
#include "fw.h"
#include "main.h"
+const struct rhashtable_params nfp_bpf_maps_neutral_params = {
+ .nelem_hint = 4,
+ .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
+ .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
+ .head_offset = offsetof(struct nfp_bpf_neutral_map, l),
+ .automatic_shrinking = true,
+};
+
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
#ifdef __LITTLE_ENDIAN
@@ -290,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
case BPF_FUNC_map_delete_elem:
bpf->helpers.map_delete = readl(&cap->func_addr);
break;
+ case BPF_FUNC_perf_event_output:
+ bpf->helpers.perf_event_output = readl(&cap->func_addr);
+ break;
}
return 0;
@@ -323,6 +334,13 @@ nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
return 0;
}
+static int
+nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ bpf->queue_select = true;
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -335,7 +353,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
start = mem;
- while (mem - start + 8 < nfp_cpp_area_size(area)) {
+ while (mem - start + 8 <= nfp_cpp_area_size(area)) {
u8 __iomem *value;
u32 type, length;
@@ -365,6 +383,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
if (nfp_bpf_parse_cap_random(app->priv, value, length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
+ if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -401,17 +423,28 @@ static int nfp_bpf_init(struct nfp_app *app)
init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
- err = nfp_bpf_parse_capabilities(app);
+ err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
if (err)
goto err_free_bpf;
+ err = nfp_bpf_parse_capabilities(app);
+ if (err)
+ goto err_free_neutral_maps;
+
return 0;
+err_free_neutral_maps:
+ rhashtable_destroy(&bpf->maps_neutral);
err_free_bpf:
kfree(bpf);
return err;
}
+static void nfp_check_rhashtable_empty(void *ptr, void *arg)
+{
+ WARN_ON_ONCE(1);
+}
+
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
@@ -419,6 +452,8 @@ static void nfp_bpf_clean(struct nfp_app *app)
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ rhashtable_free_and_destroy(&bpf->maps_neutral,
+ nfp_check_rhashtable_empty, NULL);
kfree(bpf);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4981c8944ca3..8b143546ae85 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -39,6 +39,7 @@
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/rhashtable.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -81,10 +82,16 @@ enum static_regs {
enum pkt_vec {
PKT_VEC_PKT_LEN = 0,
PKT_VEC_PKT_PTR = 2,
+ PKT_VEC_QSEL_SET = 4,
+ PKT_VEC_QSEL_VAL = 6,
};
+#define PKT_VEL_QSEL_SET_BIT 4
+
#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
+#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
+#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
#define stack_reg(np) reg_a(STATIC_REG_STACK)
#define stack_imm(np) imm_b(np)
@@ -114,6 +121,8 @@ enum pkt_vec {
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
+ * @maps_neutral: hash table of offload-neutral maps (on pointer)
+ *
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required
@@ -133,8 +142,10 @@ enum pkt_vec {
* @helpers.map_lookup: map lookup helper address
* @helpers.map_update: map update helper address
* @helpers.map_delete: map delete helper address
+ * @helpers.perf_event_output: output perf event to a ring buffer
*
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
+ * @queue_select: BPF can set the RX queue ID in packet vector
*/
struct nfp_app_bpf {
struct nfp_app *app;
@@ -150,6 +161,8 @@ struct nfp_app_bpf {
unsigned int maps_in_use;
unsigned int map_elems_in_use;
+ struct rhashtable maps_neutral;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -171,9 +184,11 @@ struct nfp_app_bpf {
u32 map_lookup;
u32 map_update;
u32 map_delete;
+ u32 perf_event_output;
} helpers;
bool pseudo_random;
+ bool queue_select;
};
enum nfp_bpf_map_use {
@@ -199,6 +214,14 @@ struct nfp_bpf_map {
enum nfp_bpf_map_use use_map[];
};
+struct nfp_bpf_neutral_map {
+ struct rhash_head l;
+ struct bpf_map *ptr;
+ u32 count;
+};
+
+extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -236,6 +259,7 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
+ * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
@@ -264,7 +288,10 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit;
};
/* jump */
- struct nfp_insn_meta *jmp_dst;
+ struct {
+ struct nfp_insn_meta *jmp_dst;
+ bool jump_neg_op;
+ };
/* function calls */
struct {
u32 func_id;
@@ -363,6 +390,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
* @adjust_head_location: if program has single adjust head call - the insn no.
+ * @map_records_cnt: the number of map pointers recorded for this prog
+ * @map_records: the map record pointers from bpf->maps_neutral
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -386,6 +415,9 @@ struct nfp_prog {
unsigned int stack_depth;
unsigned int adjust_head_location;
+ unsigned int map_records_cnt;
+ struct nfp_bpf_neutral_map **map_records;
+
struct list_head insns;
};
@@ -436,5 +468,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 42d98792bd25..4db0ac1e42a8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -57,6 +57,126 @@
#include "../nfp_net.h"
static int
+nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct bpf_map *map)
+{
+ struct nfp_bpf_neutral_map *record;
+ int err;
+
+ /* Map record paths are entered via ndo, update side is protected. */
+ ASSERT_RTNL();
+
+ /* Reuse path - other offloaded program is already tracking this map. */
+ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ nfp_bpf_maps_neutral_params);
+ if (record) {
+ nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
+ record->count++;
+ return 0;
+ }
+
+ /* Grab a single ref to the map for our record. The prog destroy ndo
+ * happens after free_used_maps().
+ */
+ map = bpf_map_inc(map, false);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ record = kmalloc(sizeof(*record), GFP_KERNEL);
+ if (!record) {
+ err = -ENOMEM;
+ goto err_map_put;
+ }
+
+ record->ptr = map;
+ record->count = 1;
+
+ err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
+ nfp_bpf_maps_neutral_params);
+ if (err)
+ goto err_free_rec;
+
+ nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
+
+ return 0;
+
+err_free_rec:
+ kfree(record);
+err_map_put:
+ bpf_map_put(map);
+ return err;
+}
+
+static void
+nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
+{
+ bool freed = false;
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = 0; i < nfp_prog->map_records_cnt; i++) {
+ if (--nfp_prog->map_records[i]->count) {
+ nfp_prog->map_records[i] = NULL;
+ continue;
+ }
+
+ WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
+ &nfp_prog->map_records[i]->l,
+ nfp_bpf_maps_neutral_params));
+ freed = true;
+ }
+
+ if (freed) {
+ synchronize_rcu();
+
+ for (i = 0; i < nfp_prog->map_records_cnt; i++)
+ if (nfp_prog->map_records[i]) {
+ bpf_map_put(nfp_prog->map_records[i]->ptr);
+ kfree(nfp_prog->map_records[i]);
+ }
+ }
+
+ kfree(nfp_prog->map_records);
+ nfp_prog->map_records = NULL;
+ nfp_prog->map_records_cnt = 0;
+}
+
+static int
+nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct bpf_prog *prog)
+{
+ int i, cnt, err;
+
+ /* Quickly count the maps we will have to remember */
+ cnt = 0;
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
+ cnt++;
+ if (!cnt)
+ return 0;
+
+ nfp_prog->map_records = kmalloc_array(cnt,
+ sizeof(nfp_prog->map_records[0]),
+ GFP_KERNEL);
+ if (!nfp_prog->map_records)
+ return -ENOMEM;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
+ err = nfp_map_ptr_record(bpf, nfp_prog,
+ prog->aux->used_maps[i]);
+ if (err) {
+ nfp_map_ptrs_forget(bpf, nfp_prog);
+ return err;
+ }
+ }
+ WARN_ON(cnt != nfp_prog->map_records_cnt);
+
+ return 0;
+}
+
+static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
@@ -151,7 +271,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
prog->aux->offload->jited_image = nfp_prog->prog;
- return 0;
+ return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
@@ -159,6 +279,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
+ nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
return 0;
@@ -320,6 +441,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
}
}
+static unsigned long
+nfp_bpf_perf_event_copy(void *dst, const void *src,
+ unsigned long off, unsigned long len)
+{
+ memcpy(dst, src + off, len);
+ return 0;
+}
+
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+{
+ struct cmsg_bpf_event *cbe = (void *)skb->data;
+ u32 pkt_size, data_size;
+ struct bpf_map *map;
+
+ if (skb->len < sizeof(struct cmsg_bpf_event))
+ goto err_drop;
+
+ pkt_size = be32_to_cpu(cbe->pkt_size);
+ data_size = be32_to_cpu(cbe->data_size);
+ map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
+
+ if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ goto err_drop;
+ if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+ goto err_drop;
+
+ rcu_read_lock();
+ if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
+ nfp_bpf_maps_neutral_params)) {
+ rcu_read_unlock();
+ pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
+ map);
+ goto err_drop;
+ }
+
+ bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
+ &cbe->data[round_up(pkt_size, 4)], data_size,
+ cbe->data, pkt_size, nfp_bpf_perf_event_copy);
+ rcu_read_unlock();
+
+ dev_consume_skb_any(skb);
+ return 0;
+err_drop:
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+}
+
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 06ad53ce4ad9..844a9be6e55a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ * Copyright (C) 2016-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,6 +36,8 @@
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include "../nfp_app.h"
+#include "../nfp_main.h"
#include "fw.h"
#include "main.h"
@@ -149,15 +151,6 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
return false;
}
- /* Rest of the checks is only if we re-parse the same insn */
- if (!meta->func_id)
- return true;
-
- if (meta->arg1.map_ptr != reg1->map_ptr) {
- pr_vlog(env, "%s: called for different map\n", fname);
- return false;
- }
-
return true;
}
@@ -216,6 +209,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
return -EOPNOTSUPP;
+ case BPF_FUNC_perf_event_output:
+ BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
+ NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
+ NFP_BPF_STACK != PTR_TO_STACK ||
+ NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
+
+ if (!bpf->helpers.perf_event_output) {
+ pr_vlog(env, "event_output: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Force current CPU to make sure we can report the event
+ * wherever we get the control message from FW.
+ */
+ if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
+ (reg3->var_off.value & BPF_F_INDEX_MASK) !=
+ BPF_F_CURRENT_CPU) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
+ pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
+ tn_buf);
+ return -EOPNOTSUPP;
+ }
+
+ /* Save space in meta, we don't care about arguments other
+ * than 4th meta, shove it into arg1.
+ */
+ reg1 = cur_regs(env) + BPF_REG_4;
+
+ if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
+ reg1->type != PTR_TO_STACK &&
+ reg1->type != PTR_TO_MAP_VALUE &&
+ reg1->type != PTR_TO_PACKET) {
+ pr_vlog(env, "event_output: unsupported ptr type: %d\n",
+ reg1->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (reg1->type == PTR_TO_STACK &&
+ !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
+ return -EOPNOTSUPP;
+
+ /* Warn user that on offload NFP may return success even if map
+ * is not going to accept the event, since the event output is
+ * fully async and device won't know the state of the map.
+ * There is also FW limitation on the event length.
+ *
+ * Lost events will not show up on the perf ring, driver
+ * won't see them at all. Events may also get reordered.
+ */
+ dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
+ "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
+ pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
+
+ if (!meta->func_id)
+ break;
+
+ if (reg1->type != meta->arg1.type) {
+ pr_vlog(env, "event_output: ptr type changed: %d %d\n",
+ meta->arg1.type, reg1->type);
+ return -EINVAL;
+ }
+ break;
+
default:
pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP;
@@ -410,6 +468,30 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
+
+ if (reg->type == PTR_TO_CTX) {
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
+ /* XDP ctx accesses must be 4B in size */
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, rx_queue_index):
+ if (nfp_prog->bpf->queue_select)
+ goto exit_check_ptr;
+ pr_vlog(env, "queue selection not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ pr_vlog(env, "unsupported store to context field\n");
+ return -EOPNOTSUPP;
+ }
+exit_check_ptr:
+ return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
+}
+
+static int
nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env)
{
@@ -464,8 +546,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
if (is_mbpf_store(meta))
- return nfp_bpf_check_ptr(nfp_prog, meta, env,
- meta->insn.dst_reg);
+ return nfp_bpf_check_store(nfp_prog, meta, env);
+
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index b3567a596fc1..80df9a5d4217 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -183,17 +183,21 @@ static int
nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun,
- enum nfp_flower_tun_type tun_type)
+ enum nfp_flower_tun_type tun_type,
+ struct net_device *netdev)
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ struct net *net;
if (ip_tun->options_len)
return -EOPNOTSUPP;
+ net = dev_net(netdev);
+
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
@@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
+ set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_pre_tunnel);
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
+ err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
+ netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 3735c09d2112..577659f332e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
- /* Acks from the NFP that the route is added - ignore. */
- break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
@@ -275,18 +272,49 @@ out:
void nfp_flower_cmsg_process_rx(struct work_struct *work)
{
+ struct sk_buff_head cmsg_joined;
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(work, struct nfp_flower_priv, cmsg_work);
+ skb_queue_head_init(&cmsg_joined);
+
+ spin_lock_bh(&priv->cmsg_skbs_high.lock);
+ skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
+ spin_unlock_bh(&priv->cmsg_skbs_high.lock);
- while ((skb = skb_dequeue(&priv->cmsg_skbs)))
+ spin_lock_bh(&priv->cmsg_skbs_low.lock);
+ skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
+ spin_unlock_bh(&priv->cmsg_skbs_low.lock);
+
+ while ((skb = __skb_dequeue(&cmsg_joined)))
nfp_flower_cmsg_process_one_rx(priv->app, skb);
}
-void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+static void
+nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
{
struct nfp_flower_priv *priv = app->priv;
+ struct sk_buff_head *skb_head;
+
+ if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
+ type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+ skb_head = &priv->cmsg_skbs_high;
+ else
+ skb_head = &priv->cmsg_skbs_low;
+
+ if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
+ nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb_queue_tail(skb_head, skb);
+ schedule_work(&priv->cmsg_work);
+}
+
+void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
+ /* Acks from the NFP that the route is added - ignore. */
+ dev_consume_skb_any(skb);
} else {
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 96bc0e33980c..bee4367a2c38 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -108,6 +108,8 @@
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
+
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
@@ -188,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 reserved;
__be64 tun_id __packed;
__be32 tun_type_index;
- __be32 extra[3];
+ __be16 reserved2;
+ u8 ttl;
+ u8 reserved3;
+ __be32 extra[2];
};
/* Metadata with L2 (1W/4B)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 6357e0720f43..4e67c0cbf9f0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -52,8 +52,6 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
-#define NFP_FLOWER_FRAME_HEADROOM 158
-
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
return "FLOWER";
@@ -249,12 +247,16 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
err = -ENOMEM;
goto err_reprs_clean;
}
- RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
port = nfp_port_alloc(app, port_type, repr);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -273,9 +275,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
repr->name);
@@ -346,27 +350,29 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
err = -ENOMEM;
goto err_reprs_clean;
}
- RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
err = nfp_port_init_phy_port(app->pf, app, port, i);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
- nfp_net_get_mac_addr(app->pf, port);
+ nfp_net_get_mac_addr(app->pf, repr, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
+ nfp_repr_free(repr);
goto err_reprs_clean;
}
@@ -375,6 +381,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
eth_tbl->ports[i].base,
phys_port);
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
phys_port, repr->name);
}
@@ -519,7 +526,8 @@ static int nfp_flower_init(struct nfp_app *app)
app->priv = app_priv;
app_priv->app = app;
- skb_queue_head_init(&app_priv->cmsg_skbs);
+ skb_queue_head_init(&app_priv->cmsg_skbs_high);
+ skb_queue_head_init(&app_priv->cmsg_skbs_low);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
init_waitqueue_head(&app_priv->reify_wait_queue);
@@ -549,7 +557,8 @@ static void nfp_flower_clean(struct nfp_app *app)
{
struct nfp_flower_priv *app_priv = app->priv;
- skb_queue_purge(&app_priv->cmsg_skbs);
+ skb_queue_purge(&app_priv->cmsg_skbs_high);
+ skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
nfp_flower_metadata_cleanup(app);
@@ -557,22 +566,6 @@ static void nfp_flower_clean(struct nfp_app *app)
app->priv = NULL;
}
-static int
-nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev,
- int new_mtu)
-{
- /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the
- * supported max MTU to allow for appending tunnel headers. To prevent
- * unexpected behaviour this needs to be accounted for.
- */
- if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) {
- nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu);
- return -EINVAL;
- }
-
- return 0;
-}
-
static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
{
bool ret;
@@ -654,7 +647,6 @@ const struct nfp_app_type app_flower = {
.init = nfp_flower_init,
.clean = nfp_flower_clean,
- .check_mtu = nfp_flower_check_mtu,
.repr_change_mtu = nfp_flower_repr_change_mtu,
.vnic_alloc = nfp_flower_vnic_alloc,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e030b3ce4510..733ff53cc601 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -47,6 +47,7 @@
struct net_device;
struct nfp_app;
+#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ENTRY_RS BIT(20)
#define NFP_FL_STATS_ELEM_RS 4
#define NFP_FL_REPEATED_HASH_MAX BIT(17)
@@ -107,7 +108,10 @@ struct nfp_mtu_conf {
* @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
- * @cmsg_skbs: List of skbs for control message processing
+ * @cmsg_skbs_high: List of higher priority skbs for control message
+ * processing
+ * @cmsg_skbs_low: List of lower priority skbs for control message
+ * processing
* @nfp_mac_off_list: List of MAC addresses to offload
* @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
* @nfp_ipv4_off_list: List of IPv4 addresses to offload
@@ -136,7 +140,8 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
- struct sk_buff_head cmsg_skbs;
+ struct sk_buff_head cmsg_skbs_high;
+ struct sk_buff_head cmsg_skbs_low;
struct list_head nfp_mac_off_list;
struct list_head nfp_mac_index_list;
struct list_head nfp_ipv4_off_list;
@@ -185,9 +190,11 @@ struct nfp_fl_payload {
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
+ struct net_device *ingress_dev;
char *unmasked_data;
char *mask_data;
char *action_data;
+ bool ingress_offload;
};
struct nfp_fl_stats_frame {
@@ -212,12 +219,14 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow);
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index db977cf8e933..21668aa435e8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
-nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie)
+nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
+ struct net_device *netdev, __be32 host_ctx)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flower_entry;
hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie)
+ if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
+ (!netdev || flower_entry->ingress_dev == netdev) &&
+ (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flower_entry->meta.host_ctx_id == host_ctx))
return flower_entry;
return NULL;
@@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
flower_cookie = be64_to_cpu(stats->stats_cookie);
rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie);
+ nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
+ stats->stats_con_id);
if (!nfp_flow)
goto exit_rcu_unlock;
- if (nfp_flow->meta.host_ctx_id != stats->stats_con_id)
- goto exit_rcu_unlock;
-
spin_lock(&nfp_flow->lock);
nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
@@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ struct net_device *netdev)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
@@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->stats.bytes = 0;
nfp_flow->stats.used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 114d2ab02a38..70ec9d821b91 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
{
struct nfp_fl_payload *flow_pay;
@@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
+ flow_pay->ingress_offload = !egress;
+
return flow_pay;
err_free_mask:
@@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
+ struct net_device *ingr_dev;
int err;
+ ingr_dev = egress ? NULL : netdev;
+ flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
+ if (flow_pay) {
+ /* Ignore as duplicate if it has been added by different cb. */
+ if (flow_pay->ingress_offload && egress)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+ }
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -413,12 +427,14 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer);
+ flow_pay = nfp_flower_allocate_new(key_layer, egress);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
+ flow_pay->ingress_dev = egress ? NULL : netdev;
+
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
if (err)
@@ -428,7 +444,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay,
+ flow_pay->ingress_dev);
if (err)
goto err_destroy_flow;
@@ -462,6 +479,7 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -470,15 +488,18 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
int err;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
- return -ENOENT;
+ return egress ? 0 : -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -505,7 +526,9 @@ err_free_flow:
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
+ * @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
+ * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -513,14 +536,21 @@ err_free_flow:
* Return: negative value on error, 0 if stats populated successfully.
*/
static int
-nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
+nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_fl_payload *nfp_flow;
+ struct net_device *ingr_dev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
+ ingr_dev = egress ? NULL : netdev;
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
+ NFP_FL_STATS_CTX_DONT_CARE);
if (!nfp_flow)
return -EINVAL;
+ if (nfp_flow->ingress_offload && egress)
+ return 0;
+
spin_lock_bh(&nfp_flow->lock);
tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
nfp_flow->stats.pkts, nfp_flow->stats.used);
@@ -543,9 +573,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
case TC_CLSFLOWER_REPLACE:
return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower);
+ return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, flower);
+ return nfp_flower_get_stats(app, netdev, flower, egress);
}
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 6aedef0ad433..0e0253c7e17b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index 2a2f2fbc8850..b9618c37403f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
if (err)
return err < 0 ? err : 0;
- nfp_net_get_mac_addr(app->pf, nn->port);
+ nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 5f2b2f24f4fa..faa4e131c136 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -183,16 +183,18 @@ enum shf_sc {
#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
- ALU_OP_NONE = 0x00,
- ALU_OP_ADD = 0x01,
- ALU_OP_NOT = 0x04,
- ALU_OP_ADD_2B = 0x05,
- ALU_OP_AND = 0x08,
- ALU_OP_SUB_C = 0x0d,
- ALU_OP_ADD_C = 0x11,
- ALU_OP_OR = 0x14,
- ALU_OP_SUB = 0x15,
- ALU_OP_XOR = 0x18,
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NOT = 0x04,
+ ALU_OP_ADD_2B = 0x05,
+ ALU_OP_AND = 0x08,
+ ALU_OP_AND_NOT_A = 0x0c,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_AND_NOT_B = 0x10,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
};
enum alu_dst_ab {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index eb0fc614673d..b1e67cf4257a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -175,8 +175,9 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
return ret;
devlink_port_type_eth_set(&port->dl_port, port->netdev);
- if (eth_port.is_split)
- devlink_port_split_set(&port->dl_port, eth_port.label_port);
+ devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ eth_port.label_port, eth_port.is_split,
+ eth_port.label_subport);
devlink = priv_to_devlink(app->pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index c4b1f344b4da..0ade122805ad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -486,6 +486,10 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_disable_msix;
}
+ err = nfp_resource_table_init(pf->cpp);
+ if (err)
+ goto err_cpp_free;
+
pf->hwinfo = nfp_hwinfo_read(pf->cpp);
dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n",
@@ -548,6 +552,7 @@ err_fw_unload:
vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
+err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
destroy_workqueue(pf->wq);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index add46e28212b..42211083b51f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf);
int nfp_hwmon_register(struct nfp_pf *pf);
void nfp_hwmon_unregister(struct nfp_pf *pf);
-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
+ struct nfp_port *port);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1eb6549f2a54..d9111c077699 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1722,7 +1722,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
act = bpf_prog_run_xdp(xdp_prog, &xdp);
- pkt_len -= xdp.data - orig_data;
+ pkt_len = xdp.data_end - xdp.data;
pkt_off += xdp.data - orig_data;
switch (act) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 15fa47f622aa..45cd2092e498 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -67,23 +67,26 @@
/**
* nfp_net_get_mac_addr() - Get the MAC address.
* @pf: NFP PF handle
+ * @netdev: net_device to set MAC address on
* @port: NFP port structure
*
* First try to get the MAC address from NSP ETH table. If that
* fails generate a random address.
*/
-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port)
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
+ struct nfp_port *port)
{
struct nfp_eth_table_port *eth_port;
eth_port = __nfp_port_get_eth_port(port);
if (!eth_port) {
- eth_hw_addr_random(port->netdev);
+ eth_hw_addr_random(netdev);
return;
}
- ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
- ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
+ ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+ ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
}
static struct nfp_eth_table_port *
@@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
return PTR_ERR(mem);
}
- min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
- pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
- "net.macstats", min_size,
- &pf->mac_stats_bar);
- if (IS_ERR(pf->mac_stats_mem)) {
- if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
- err = PTR_ERR(pf->mac_stats_mem);
- goto err_unmap_ctrl;
+ if (pf->eth_tbl) {
+ min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
+ pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
+ "net.macstats", min_size,
+ &pf->mac_stats_bar);
+ if (IS_ERR(pf->mac_stats_mem)) {
+ if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
+ err = PTR_ERR(pf->mac_stats_mem);
+ goto err_unmap_ctrl;
+ }
+ pf->mac_stats_mem = NULL;
}
- pf->mac_stats_mem = NULL;
}
pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 0cd077addb26..6e79da91e475 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -348,12 +348,17 @@ err_clean:
return err;
}
-static void nfp_repr_free(struct nfp_repr *repr)
+static void __nfp_repr_free(struct nfp_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
}
+void nfp_repr_free(struct net_device *netdev)
+{
+ __nfp_repr_free(netdev_priv(netdev));
+}
+
struct net_device *nfp_repr_alloc(struct nfp_app *app)
{
struct net_device *netdev;
@@ -385,7 +390,7 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
repr->netdev->name);
nfp_repr_clean(repr);
- nfp_repr_free(repr);
+ __nfp_repr_free(repr);
}
void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index a621e8ff528e..cd756a15445f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -123,6 +123,7 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
+void nfp_repr_free(struct net_device *netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index ced62d112aa2..f44d0a857314 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
/* MAC Statistics Accumulator */
#define NFP_RESOURCE_MAC_STATISTICS "mac.stat"
+int nfp_resource_table_init(struct nfp_cpp *cpp);
+
struct nfp_resource *
nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index cd678323bacb..a0e336bd1d85 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1330,6 +1330,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
/* Finished with card initialization. */
dev_info(&pdev->dev,
"Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+ pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index c8f2c064cce3..4e19add1c539 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -295,6 +295,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address);
/**
* nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index f7b958181126..c88bf673cb76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface)
return (u32)interface << 16 | 0x0000;
}
+static u32 nfp_mutex_owner(u32 val)
+{
+ return val >> 16;
+}
+
static bool nfp_mutex_is_locked(u32 val)
{
return (val & 0xffff) == 0x000f;
@@ -211,8 +216,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
break;
err = msleep_interruptible(timeout_ms);
- if (err != 0)
+ if (err != 0) {
+ nfp_info(mutex->cpp,
+ "interrupted waiting for NFP mutex\n");
return -ERESTARTSYS;
+ }
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
@@ -348,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
}
+
+/**
+ * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint
+ * @cpp: NFP CPP handle
+ * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address: Offset into the address space of the NFP CPP target ID
+ *
+ * Release lock if held by local system. Extreme care is advised, call only
+ * when no local lock users can exist.
+ *
+ * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex
+ */
+int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target,
+ unsigned long long address)
+{
+ const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ u16 interface = nfp_cpp_interface(cpp);
+ int err;
+ u32 tmp;
+
+ err = nfp_cpp_mutex_validate(interface, &target, address);
+ if (err)
+ return err;
+
+ /* Check lock */
+ err = nfp_cpp_readl(cpp, mur, address, &tmp);
+ if (err < 0)
+ return err;
+
+ if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface)
+ return 0;
+
+ /* Bust the lock */
+ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface));
+ if (err < 0)
+ return err;
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 99bb679a9801..2abee0fe3a7c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
if ((*reg & mask) == val)
return 0;
- if (msleep_interruptible(25))
- return -ERESTARTSYS;
+ msleep(25);
if (time_after(start_time, wait_until))
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index 7e14725055c7..2dd89dba9311 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res)
{
return res->size;
}
+
+/**
+ * nfp_resource_table_init() - Run initial checks on the resource table
+ * @cpp: NFP CPP handle
+ *
+ * Start-of-day init procedure for resource table. Must be called before
+ * any local resource table users may exist.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int nfp_resource_table_init(struct nfp_cpp *cpp)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ int i, err;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE);
+ if (err < 0) {
+ nfp_err(cpp, "Error: failed to reclaim resource table mutex\n");
+ return err;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted main resource table mutex\n");
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex)
+ return -ENOMEM;
+
+ if (nfp_cpp_mutex_lock(dev_mutex)) {
+ nfp_err(cpp, "Error: failed to claim resource table mutex\n");
+ nfp_cpp_mutex_free(dev_mutex);
+ return -EINVAL;
+ }
+
+ /* Resource 0 is the dev_mutex, start from 1 */
+ for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ u64 addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr);
+ if (err < 0) {
+ nfp_err(cpp,
+ "Error: failed to reclaim resource %d mutex\n",
+ i);
+ goto err_unlock;
+ }
+ if (err)
+ nfp_warn(cpp, "Warning: busted resource %d mutex\n", i);
+ }
+
+ err = 0;
+err_unlock:
+ nfp_cpp_mutex_unlock(dev_mutex);
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 27364b7572fc..b092894dd128 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1170,7 +1170,7 @@ static void *nixge_get_nvmem_address(struct device *dev)
cell = nvmem_cell_get(dev, "address");
if (IS_ERR(cell))
- return cell;
+ return NULL;
mac = nvmem_cell_read(cell, &cell_size);
nvmem_cell_put(cell);
@@ -1183,7 +1183,7 @@ static int nixge_probe(struct platform_device *pdev)
struct nixge_priv *priv;
struct net_device *ndev;
struct resource *dmares;
- const char *mac_addr;
+ const u8 *mac_addr;
int err;
ndev = alloc_etherdev(sizeof(*priv));
@@ -1202,10 +1202,12 @@ static int nixge_probe(struct platform_device *pdev)
ndev->max_mtu = NIXGE_JUMBO_MTU;
mac_addr = nixge_get_nvmem_address(&pdev->dev);
- if (mac_addr && is_valid_ether_addr(mac_addr))
+ if (mac_addr && is_valid_ether_addr(mac_addr)) {
ether_addr_copy(ndev->dev_addr, mac_addr);
- else
+ kfree(mac_addr);
+ } else {
eth_hw_addr_random(ndev);
+ }
priv = netdev_priv(ndev);
priv->ndev = ndev;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index c70cf2ad81c0..a0acb94d65f0 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
+ qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index e07460a68d30..00db3401b898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -92,6 +92,8 @@ struct qed_eth_cb_ops;
struct qed_dev_info;
union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
+enum qed_mfw_tlv_type;
+union qed_mfw_tlv_data;
/* helpers */
#define QED_MFW_GET_FIELD(name, field) \
@@ -439,6 +441,59 @@ struct qed_fw_data {
u32 init_ops_size;
};
+enum qed_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ QED_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ QED_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ QED_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ QED_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ QED_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ QED_MF_INTER_PF_SWITCH,
+
+ /* Unified Fabtic Port support enabled */
+ QED_MF_UFP_SPECIFIC,
+
+ /* Disable Accelerated Receive Flow Steering (aRFS) */
+ QED_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ QED_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ QED_MF_8021AD_TAGGING,
+
+ /* Allow DSCP to TC mapping */
+ QED_MF_DSCP_TO_TC_MAP,
+};
+
+enum qed_ufp_mode {
+ QED_UFP_MODE_ETS,
+ QED_UFP_MODE_VNIC_BW,
+ QED_UFP_MODE_UNKNOWN
+};
+
+enum qed_ufp_pri_type {
+ QED_UFP_PRI_OS,
+ QED_UFP_PRI_VNIC,
+ QED_UFP_PRI_UNKNOWN
+};
+
+struct qed_ufp_info {
+ enum qed_ufp_pri_type pri_type;
+ enum qed_ufp_mode mode;
+ u8 tc;
+};
+
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
@@ -460,6 +515,10 @@ struct qed_simd_fp_handler {
void (*func)(void *);
};
+enum qed_slowpath_wq_flag {
+ QED_SLOWPATH_MFW_TLV_REQ,
+};
+
struct qed_hwfn {
struct qed_dev *cdev;
u8 my_id; /* ID inside the PF */
@@ -547,6 +606,8 @@ struct qed_hwfn {
struct qed_dcbx_info *p_dcbx_info;
+ struct qed_ufp_info ufp_info;
+
struct qed_dmae_info dmae_info;
/* QM init */
@@ -587,6 +648,9 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct workqueue_struct *slowpath_wq;
+ struct delayed_work slowpath_task;
+ unsigned long slowpath_task_flags;
};
struct pci_params {
@@ -669,10 +733,8 @@ struct qed_dev {
u8 num_funcs_in_port;
u8 path_id;
- enum qed_mf_mode mf_mode;
-#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
-#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
-#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
+
+ unsigned long mf_bits;
int pcie_width;
int pcie_speed;
@@ -853,5 +915,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
+ enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_data);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 449777f21237..8f31406ec894 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
u8 tc, priority_map;
- bool enable, ieee;
u16 protocol_id;
int priority;
int i;
@@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !(type == DCBX_PROTOCOL_ETH);
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 4926c5532fba..39124b594a36 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -419,6 +419,7 @@ struct phy_defs {
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_NAME_LEN 3
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -3650,8 +3651,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
: 128;
- strscpy(type_name, big_ram->instance_name, sizeof(type_name));
- strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
+ strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+ strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -7778,6 +7779,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
}
+int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ *length = 0;
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ *length = image_att.length;
+
+ return rc;
+}
+
+int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes, enum qed_nvm_images image_id)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ u32 len_rounded, i;
+ __be32 val;
+ int rc;
+
+ *num_dumped_bytes = 0;
+ rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
+ if (rc)
+ return rc;
+
+ DP_NOTICE(p_hwfn->cdev,
+ "Collecting a debug feature [\"nvram image %d\"]\n",
+ image_id);
+
+ len_rounded = roundup(len_rounded, sizeof(u32));
+ rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
+ if (rc)
+ return rc;
+
+ /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
+ if (image_id != QED_NVM_IMAGE_NVM_META)
+ for (i = 0; i < len_rounded; i += 4) {
+ val = cpu_to_be32(*(u32 *)(buffer + i));
+ *(u32 *)(buffer + i) = val;
+ }
+
+ *num_dumped_bytes = len_rounded;
+
+ return rc;
+}
+
int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes)
{
@@ -7831,6 +7883,9 @@ enum debug_print_features {
IGU_FIFO = 6,
PHY = 7,
FW_ASSERTS = 8,
+ NVM_CFG1 = 9,
+ DEFAULT_CFG = 10,
+ NVM_META = 11,
};
static u32 qed_calc_regdump_header(enum debug_print_features feature,
@@ -7965,13 +8020,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
+ /* nvm cfg1 */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_NVM_CFG1);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(NVM_CFG1, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ }
+
+ /* nvm default */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
+ rc);
+ }
+
+ /* nvm meta */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
+ &feature_size, QED_NVM_IMAGE_NVM_META);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(NVM_META, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ }
+
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ u32 regs_len = 0, image_len = 0;
u8 cur_engine, org_engine;
- u32 regs_len = 0;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7993,6 +8096,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
/* Engine common */
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
return regs_len;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d2ad5e92c74f..560528962658 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- switch (p_hwfn->cdev->mf_mode) {
- case QED_MF_DEFAULT:
- case QED_MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
- case QED_MF_OVLAN:
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
- break;
- default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ else
hw_mode |= 1 << MODE_MF_SI;
- }
hw_mode |= 1 << MODE_ASIC;
@@ -1507,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
}
/* Enable classification by MAC if needed */
@@ -1557,7 +1554,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
- p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
@@ -1644,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
+ u16 ether_type;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
@@ -1677,6 +1674,24 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (rc)
return rc;
+ if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
+ &cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING,
+ &cdev->mf_bits))) {
+ if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
+ ether_type = ETH_P_8021Q;
+ else
+ ether_type = ETH_P_8021AD;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
qed_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params);
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
@@ -2639,31 +2654,57 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
- /* Read Multi-function information from shmem */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, glob) +
- offsetof(struct nvm_cfg1_glob, generic_cont0);
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct qed_dev *cdev = p_hwfn->cdev;
- generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
- mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
- NVM_CFG1_GLOB_MF_MODE_OFFSET;
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
- switch (mf_mode) {
- case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
- break;
- case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->cdev->mf_mode = QED_MF_NPAR;
- break;
- case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
- p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
- break;
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_UFP_SPECIFIC) |
+ BIT(QED_MF_8021Q_TAGGING);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_8021AD_TAGGING);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST) |
+ BIT(QED_MF_INTER_PF_SWITCH);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST);
+ if (QED_IS_BB(p_hwfn->cdev))
+ cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
+ break;
+ }
+
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ cdev->mf_bits);
}
- DP_INFO(p_hwfn, "Multi function mode is %08x\n",
- p_hwfn->cdev->mf_mode);
- /* Read Multi-function information from shmem */
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->cdev->mf_bits);
+
+ /* Read device capabilities information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, device_capabilities);
@@ -2856,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_port_init(p_hwfn, p_ptt);
qed_get_eee_caps(p_hwfn, p_ptt);
+
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
}
if (qed_mcp_is_init(p_hwfn)) {
@@ -3462,7 +3505,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return 0;
qed_llh_mac_to_filter(&high, &low, p_filter);
@@ -3507,7 +3550,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return;
qed_llh_mac_to_filter(&high, &low, p_filter);
@@ -3549,7 +3592,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return 0;
switch (type) {
@@ -3647,7 +3690,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return;
switch (type) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 2dc9b312a795..cc1b373c0ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
p_data->flags = p_conn->flags;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ SET_FIELD(p_data->flags,
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
p_data->def_q_idx = p_conn->def_q_idx;
return qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 7f5ec42dde48..8e1e6e1eb40e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -11863,6 +11863,8 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u32 data_ptr;
+ u32 data_size;
};
struct fw_flr_mb {
@@ -11993,6 +11995,16 @@ struct public_port {
#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
};
struct public_func {
@@ -12069,6 +12081,23 @@ struct public_func {
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
#define DRV_ID_DRV_INIT_HW_SHIFT 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
};
struct mcp_mac {
@@ -12295,6 +12324,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
@@ -12495,6 +12525,8 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_MAX
};
@@ -12530,6 +12562,233 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
+};
+
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index e874504e8b28..5e655c3601cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
void qed_l2_setup(struct qed_hwfn *p_hwfn)
{
- if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
- p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
return;
mutex_init(&p_hwfn->p_l2_info->lock);
@@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn)
{
u32 i;
- if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
- p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
return;
if (!p_hwfn->p_l2_info)
@@ -2850,6 +2848,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+{
+ int i, ret;
+
+ if (IS_PF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QED_SRIOV
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
@@ -2887,6 +2903,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
.configure_arfs_searcher = &qed_configure_arfs_searcher,
.get_coalesce = &qed_get_coalesce,
+ .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 74fc626b1ec1..c97ebd681c47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_tx_packet *p_pkt = NULL;
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_tx_queue *p_tx;
+ unsigned long flags = 0;
dma_addr_t tx_frag;
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx = &p_ll2_conn->tx_queue;
+ spin_lock_irqsave(&p_tx->lock, flags);
while (!list_empty(&p_tx->active_descq)) {
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ spin_unlock_irqrestore(&p_tx->lock, flags);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag,
b_last_packet);
}
+ spin_lock_irqsave(&p_tx->lock, flags);
}
+ spin_unlock_irqrestore(&p_tx->lock, flags);
}
static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags = 0;
p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_rx = &p_ll2_conn->rx_queue;
+ spin_lock_irqsave(&p_rx->lock, flags);
while (!list_empty(&p_rx->active_descq)) {
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
if (!p_pkt)
break;
-
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ spin_unlock_irqrestore(&p_rx->lock, flags);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
cookie,
rx_buf_addr, b_last);
}
+ spin_lock_irqsave(&p_rx->lock, flags);
}
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+}
+
+static bool
+qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
+ struct core_rx_slow_path_cqe *p_cqe)
+{
+ struct ooo_opaque *iscsi_ooo;
+ u32 cid;
+
+ if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
+ return false;
+
+ iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ return false;
+
+ /* Need to make a flush */
+ cid = le32_to_cpu(iscsi_ooo->cid);
+ qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
+
+ return true;
}
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
cqe_type = cqe->rx_cqe_sp.type;
+ if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
+ if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
+ &cqe->rx_cqe_sp))
+ continue;
+
if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc;
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
if (rc)
return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
@@ -919,12 +960,16 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_stripping_en =
p_ll2_conn->input.rx_vlan_removal_en;
+
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
+ p_ramrod->report_outer_vlan = 1;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
- if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
- p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
- (conn_type != QED_LL2_TYPE_IWARP)) {
+ if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
+ p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
+ conn_type != QED_LL2_TYPE_IWARP) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1493,11 +1538,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
+ ETH_P_FCOE, 0,
+ QED_LLH_FILTER_ETHERTYPE);
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- 0x8906, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_ptt,
- 0x8914, 0,
+ ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
}
@@ -1653,11 +1699,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
- else
+ } else {
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
+ pkt->remove_stag = true;
+ }
+
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
@@ -1668,6 +1719,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
+ !!(pkt->remove_stag));
+
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
@@ -1867,28 +1921,37 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
+
qed_ll2_txq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
+
qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
}
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
+ ETH_P_FCOE, 0,
+ QED_LLH_FILTER_ETHERTYPE);
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- 0x8906, 0,
- QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
- 0x8914, 0,
+ ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
}
@@ -1925,16 +1988,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
if (!p_ll2_conn)
return;
- if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->rx_queue.b_cb_registred = false;
- qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
- }
-
- if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->tx_queue.b_cb_registred = false;
- qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
- }
-
kfree(p_ll2_conn->tx_queue.descq_mem);
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
@@ -2360,7 +2413,8 @@ fail:
return -EINVAL;
}
-static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags)
{
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
@@ -2370,7 +2424,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
u8 flags = 0;
if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
- DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+ DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
return -EINVAL;
}
@@ -2405,6 +2459,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
pkt.first_frag = mapping;
pkt.first_frag_len = skb->len;
pkt.cookie = skb;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
+ test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
+ pkt.remove_stag = true;
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 9854aa9139af..68c4399ffd50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -264,7 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
- dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
@@ -273,7 +272,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
+ &cdev->mf_bits);
dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
@@ -680,7 +680,7 @@ static int qed_nic_stop(struct qed_dev *cdev)
tasklet_disable(p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
- "Disabled sp taskelt [hwfn %d] at %p\n",
+ "Disabled sp tasklet [hwfn %d] at %p\n",
i, p_hwfn->sp_dpc);
}
}
@@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+ int i;
+
+ if (IS_VF(cdev))
+ return;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].slowpath_wq)
+ continue;
+
+ flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+ }
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ slowpath_task.work);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+ return;
+ }
+
+ if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+ &hwfn->slowpath_task_flags))
+ qed_mfw_process_tlv_req(hwfn, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ char name[NAME_SIZE];
+ int i;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+
+ snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+ hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ if (!hwfn->slowpath_wq) {
+ DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ }
+
+ return 0;
+}
+
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
@@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev))
goto err;
+ if (qed_slowpath_wq_start(cdev))
+ goto err;
+
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
@@ -1095,6 +1160,8 @@ err:
qed_iov_wq_stop(cdev, false);
+ qed_slowpath_wq_stop(cdev);
+
return rc;
}
@@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_slowpath_wq_stop(cdev);
+
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
@@ -1894,15 +1963,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
u8 *buf, u16 len)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
- int rc;
- if (!ptt)
- return -EAGAIN;
-
- rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
- qed_ptt_release(hwfn, ptt);
- return rc;
+ return qed_mcp_get_nvm_image(hwfn, type, buf, len);
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
@@ -2095,3 +2157,89 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
return;
}
}
+
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Scheduling slowpath task [Flag: %d]\n",
+ QED_SLOWPATH_MFW_TLV_REQ);
+ smp_mb__before_atomic();
+ set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+ return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+ struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+ struct qed_eth_stats_common *p_common;
+ struct qed_generic_tlvs gen_tlvs;
+ struct qed_eth_stats stats;
+ int i;
+
+ memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+ op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+ if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+ tlv->flags.ipv4_csum_offload = true;
+ if (gen_tlvs.feat_flags & QED_TLV_LSO)
+ tlv->flags.lso_supported = true;
+ tlv->flags.b_set = true;
+
+ for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+ if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+ ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+ tlv->mac_set[i] = true;
+ }
+ }
+
+ qed_get_vport_stats(cdev, &stats);
+ p_common = &stats.common;
+ tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ tlv->rx_frames_set = true;
+ tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+ tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
+ tlv->tx_frames_set = true;
+ tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+}
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_buf)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ struct qed_common_cb_ops *ops;
+
+ ops = cdev->protocol_ops.common;
+ if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+ DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case QED_MFW_TLV_GENERIC:
+ qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+ break;
+ case QED_MFW_TLV_ETH:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+ break;
+ case QED_MFW_TLV_FCOE:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+ break;
+ case QED_MFW_TLV_ISCSI:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index ec0d425766a7..2612e3e458d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -40,6 +40,7 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include "qed.h"
+#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ return;
+
+ memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, oem_cfg_port));
+ val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
+ OEM_CFG_CHANNEL_TYPE_OFFSET;
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+
+ val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
+ if (val == OEM_CFG_SCHED_TYPE_ETS) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
+ } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
+ } else {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
+ DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ }
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET;
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
+ OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
+ } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
+ } else {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
+ DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ }
+
+ DP_NOTICE(p_hwfn,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode,
+ p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+}
+
+static int
+qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ qed_qm_reconf(p_hwfn, p_ptt);
+ } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
+ /* Merge UFP TC with the dcbx TC data */
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ } else {
+ DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
+ return -EINVAL;
+ }
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update_ufp(p_hwfn);
+
+ /* update stag pcp value */
+ qed_sp_pf_update_stag(p_hwfn);
+
+ return 0;
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_dcbx_mib_update_event(p_hwfn, p_ptt,
QED_DCBX_OPERATIONAL_MIB);
break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
@@ -1544,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_GET_TLV_REQ:
+ qed_mfw_tlv_req(p_hwfn);
break;
default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
@@ -2529,9 +2609,8 @@ err0:
return rc;
}
-static int
+int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
struct qed_nvm_image_att *p_image_att)
{
@@ -2546,6 +2625,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
+ case QED_NVM_IMAGE_NVM_CFG1:
+ type = NVM_TYPE_NVM_CFG1;
+ break;
+ case QED_NVM_IMAGE_DEFAULT_CFG:
+ type = NVM_TYPE_DEFAULT_CFG;
+ break;
+ case QED_NVM_IMAGE_NVM_META:
+ type = NVM_TYPE_META;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
image_id);
@@ -2569,7 +2657,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
}
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len)
{
@@ -2578,7 +2665,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
memset(p_buffer, 0, buffer_len);
- rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
if (rc)
return rc;
@@ -2590,9 +2677,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
- image_att.length -= 4;
-
if (image_att.length > buffer_len) {
DP_VERBOSE(p_hwfn,
QED_MSG_STORAGE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8a5c988d0c3c..632a838f1fe3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -213,6 +213,44 @@ enum qed_ov_wol {
QED_OV_WOL_ENABLED
};
+enum qed_mfw_tlv_type {
+ QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ QED_MFW_TLV_MAX = 0x16,
+};
+
+struct qed_mfw_tlv_generic {
+#define QED_MFW_TLV_FLAGS_SIZE 2
+ struct {
+ u8 ipv4_csum_offload;
+ u8 lso_supported;
+ bool b_set;
+ } flags;
+
+#define QED_MFW_TLV_MAC_COUNT 3
+ /* First entry for primary MAC, 2 secondary MACs possible */
+ u8 mac[QED_MFW_TLV_MAC_COUNT][6];
+ bool mac_set[QED_MFW_TLV_MAC_COUNT];
+
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union qed_mfw_tlv_data {
+ struct qed_mfw_tlv_generic generic;
+ struct qed_mfw_tlv_eth eth;
+ struct qed_mfw_tlv_fcoe fcoe;
+ struct qed_mfw_tlv_iscsi iscsi;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -486,7 +524,20 @@ struct qed_nvm_image_att {
* @brief Allows reading a whole nvram image
*
* @param p_hwfn
- * @param p_ptt
+ * @param image_id - image to get attributes for
+ * @param p_image_att - image attributes structure into which to fill data
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att);
+
+/**
+ * @brief Allows reading a whole nvram image
+ *
+ * @param p_hwfn
* @param image_id - image requested for reading
* @param p_buffer - allocated buffer into which to fill data
* @param buffer_len - length of the allocated buffer.
@@ -494,7 +545,6 @@ struct qed_nvm_image_att {
* @return 0 iff p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
@@ -549,6 +599,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct bist_nvm_image_att *p_image_att,
u32 image_index);
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the qed client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -609,6 +670,14 @@ struct qed_mcp_mb_params {
u32 mcp_param;
};
+struct qed_drv_tlv_hdr {
+ u8 tlv_type;
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define QED_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -993,6 +1062,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief Populate the nvm info shadow in the given hardware function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
new file mode 100644
index 000000000000..6c16158d8090
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -0,0 +1,1337 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+#define QED_TLV_DATA_MAX (14)
+struct qed_tlv_parsed_buf {
+ /* To be filled with the address to set in Value field */
+ void *p_val;
+
+ /* To be used internally in case the value has to be modified */
+ u8 data[QED_TLV_DATA_MAX];
+};
+
+static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ case DRV_TLV_NPIV_ENABLED:
+ case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
+ case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
+ case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
+ case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
+ case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
+ case DRV_TLV_NCSI_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= QED_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = QED_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_ISCSI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Returns size of the data buffer or, -1 in case TLV data is not available. */
+static int
+qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_generic *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->flags.b_set) {
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
+ 1 : 0;
+ p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
+ 1 : 0) << 1;
+ p_buf->p_val = p_buf->data;
+ return QED_MFW_TLV_FLAGS_SIZE;
+ }
+ break;
+
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ {
+ int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
+
+ if (p_drv_buf->mac_set[idx]) {
+ p_buf->p_val = p_drv_buf->mac[idx];
+ return ETH_ALEN;
+ }
+ break;
+ }
+
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_eth *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ p_buf->p_val = &p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ p_buf->p_val = &p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ p_buf->p_val = &p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ if (!p_time->b_set)
+ return -1;
+
+ /* Validate numbers */
+ if (p_time->month > 12)
+ p_time->month = 0;
+ if (p_time->day > 31)
+ p_time->day = 0;
+ if (p_time->hour > 23)
+ p_time->hour = 0;
+ if (p_time->min > 59)
+ p_time->hour = 0;
+ if (p_time->msec > 999)
+ p_time->msec = 0;
+ if (p_time->usec > 999)
+ p_time->usec = 0;
+
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ snprintf(p_buf->data, 14, "%d%d%d%d%d%d",
+ p_time->month, p_time->day,
+ p_time->hour, p_time->min, p_time->msec, p_time->usec);
+
+ p_buf->p_val = p_buf->data;
+
+ return QED_MFW_TLV_TIME_SIZE;
+}
+
+static int
+qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_fcoe *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ struct qed_mfw_tlv_time *p_time;
+ u8 idx;
+
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ p_buf->p_val = &p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ p_buf->p_val = &p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ p_buf->p_val = &p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ p_buf->p_val = &p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ p_buf->p_val = &p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ p_buf->p_val = &p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ p_buf->p_val = &p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ p_buf->p_val = &p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ p_buf->p_val = &p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ p_buf->p_val = &p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ p_buf->p_val = &p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ p_buf->p_val = &p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ p_buf->p_val = &p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ p_buf->p_val = &p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ p_buf->p_val = &p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ p_buf->p_val = &p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ p_buf->p_val = &p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ p_buf->p_val = &p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ p_buf->p_val = &p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->crc_err_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx];
+ return sizeof(p_drv_buf->crc_err_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx],
+ p_buf);
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ p_buf->p_val = &p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ p_buf->p_val = &p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ p_buf->p_val = &p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ p_buf->p_val = &p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ p_buf->p_val = &p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1;
+ if (p_drv_buf->flogi_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_param[idx];
+ return sizeof(p_drv_buf->flogi_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1;
+
+ if (p_drv_buf->flogi_acc_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_acc_param[idx];
+ return sizeof(p_drv_buf->flogi_acc_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp,
+ p_buf);
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ p_buf->p_val = &p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ p_buf->p_val = &p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx];
+ return sizeof(p_drv_buf->plogi_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx],
+ p_buf);
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_acc_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->plogi_acc_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ p_buf->p_val = &p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) /
+ 2;
+
+ if (p_drv_buf->plogo_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx];
+ return sizeof(p_drv_buf->plogo_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx],
+ p_buf);
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ p_buf->p_val = &p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ p_buf->p_val = &p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ p_buf->p_val = &p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ p_buf->p_val = &p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ p_buf->p_val = &p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->abts_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx];
+ return sizeof(p_drv_buf->abts_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx],
+ p_buf);
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ p_buf->p_val = &p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1;
+
+ if (p_drv_buf->rx_rscn_nport_set[idx]) {
+ p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx];
+ return sizeof(p_drv_buf->rx_rscn_nport[idx]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ p_buf->p_val = &p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ p_buf->p_val = &p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ p_buf->p_val = &p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ p_buf->p_val = &p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ p_buf->p_val = &p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ p_buf->p_val = &p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ p_buf->p_val = &p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ p_buf->p_val = &p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ p_buf->p_val = &p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ p_buf->p_val = &p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ p_buf->p_val = &p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ p_buf->p_val = &p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ p_buf->p_val = &p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ p_buf->p_val = &p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ p_buf->p_val = &p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2;
+
+ if (p_drv_buf->scsi_rx_chk_set[idx]) {
+ p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx];
+ return sizeof(p_drv_buf->scsi_rx_chk[idx]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->scsi_chk_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_iscsi *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ p_buf->p_val = &p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ p_buf->p_val = &p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ p_buf->p_val = &p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ p_buf->p_val = &p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ p_buf->p_val = &p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ p_buf->p_val = &p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf, u32 size)
+{
+ union qed_mfw_tlv_data *p_tlv_data;
+ struct qed_tlv_parsed_buf buffer;
+ struct qed_drv_tlv_hdr tlv;
+ int len = 0;
+ u32 offset;
+ u8 *p_tlv;
+
+ p_tlv_data = vzalloc(sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return -ENOMEM;
+
+ if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
+ vfree(p_tlv_data);
+ return -EINVAL;
+ }
+
+ memset(&tlv, 0, sizeof(tlv));
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_tlv = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_tlv);
+ tlv.tlv_length = TLV_LENGTH(p_tlv);
+ tlv.tlv_flags = TLV_FLAGS(p_tlv);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
+ tlv.tlv_length, tlv.tlv_flags);
+
+ if (tlv_group == QED_MFW_TLV_GENERIC)
+ len = qed_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_ETH)
+ len = qed_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_FCOE)
+ len = qed_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe,
+ &buffer);
+ else
+ len = qed_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi,
+ &buffer);
+
+ if (len > 0) {
+ WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
+ len, 4 * tlv.tlv_length);
+ len = min_t(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
+ TLV_FLAGS(p_tlv) = tlv.tlv_flags;
+ memcpy(p_mfw_buf + offset + sizeof(tlv),
+ buffer.p_val, len);
+ }
+ }
+
+ vfree(p_tlv_data);
+
+ return 0;
+}
+
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
+ u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
+ struct qed_drv_tlv_hdr tlv;
+ int rc;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + offsetof(struct public_global, data_ptr);
+ addr = qed_rd(p_hwfn, p_ptt, addr);
+ size = qed_rd(p_hwfn, p_ptt, global_addr +
+ offsetof(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = vzalloc(size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer. MFW represents the TLV in
+ * little endian format and mcp returns it bigendian format. Hence
+ * driver need to convert data to little endian first and then do the
+ * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
+ *
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = qed_rd(p_hwfn, p_ptt, addr + offset);
+ val = be32_to_cpu(val);
+ memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Un recognized TLV %d\n", tlv.tlv_type);
+ }
+
+ /* Sanitize the TLV groups according to personality */
+ if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping L2 TLVs for non-L2 function\n");
+ tlv_group &= ~QED_MFW_TLV_ETH;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_FCOE) &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping FCoE TLVs for non-FCoE function\n");
+ tlv_group &= ~QED_MFW_TLV_FCOE;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_ISCSI) &&
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping iSCSI TLVs for non-iSCSI function\n");
+ tlv_group &= ~QED_MFW_TLV_ISCSI;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id)
+ if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
+ goto drv_done;
+ }
+
+ /* Write the TLV data to shared memory. The stream of 4 bytes first need
+ * to be mem-copied to u32 element to make it as LSB format. And then
+ * converted to big endian as required by mcp-write.
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
+ val = cpu_to_be32(val);
+ qed_wr(p_hwfn, p_ptt, addr + offset, val);
+ }
+
+drv_done:
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ vfree(p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index fb7c2d1562ae..6acfd43c1a4f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (!(qp->resp_offloaded)) {
DP_NOTICE(p_hwfn,
- "The responder's qp should be offloded before requester's\n");
+ "The responder's qp should be offloaded before requester's\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index ab4ad8a1e2a5..e95431f6acd4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param p_tunn
- * @param mode
* @param allow_npar_tx_switch
*
* @return int
@@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
- enum qed_mf_mode mode, bool allow_npar_tx_switch);
+ bool allow_npar_tx_switch);
/**
* @brief qed_sp_pf_update - PF Function Update Ramrod
@@ -463,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
* @return int
*/
+/**
+ * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 5e927b6cac22..8de644b4721e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
- enum qed_mf_mode mode, bool allow_npar_tx_switch)
+ bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
- u8 page_cnt;
+ u8 page_cnt, i;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -339,21 +339,36 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
- switch (mode) {
- case QED_MF_DEFAULT:
- case QED_MF_NPAR:
- p_ramrod->mf_mode = MF_NPAR;
- break;
- case QED_MF_OVLAN:
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
p_ramrod->mf_mode = MF_OVLAN;
- break;
- default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ else
p_ramrod->mf_mode = MF_NPAR;
- }
p_ramrod->outer_tag_config.outer_tag.tci =
- cpu_to_le16(p_hwfn->hw_info.ovlan);
+ cpu_to_le16(p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+ } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ }
+
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode.
+ */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ else
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+
+ p_ramrod->outer_tag_config.outer_tag.tci |=
+ cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
+ }
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -365,7 +380,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
- if (IS_MF_SI(p_hwfn))
+ if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
@@ -434,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EOPNOTSUPP;
+
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
+ DP_INFO(p_hwfn, "Invalid priority type %d\n",
+ p_hwfn->ufp_info.pri_type);
+ return -EINVAL;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 5acb91b3564c..f01bf52bc381 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code);
-
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
@@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return -EINVAL;
- if (events & BIT(MAC_ADDR_FORCED)) {
+ if ((events & BIT(MAC_ADDR_FORCED)) ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
"PF failed to configure MAC for VF\n");
return rc;
}
-
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_vf->p_vf_info.is_trusted_configured)
+ p_vf->configured_features |=
+ BIT(VFPF_BULLETIN_MAC_ADDR);
+ else
+ p_vf->configured_features |=
+ BIT(MAC_ADDR_FORCED);
}
if (events & BIT(VLAN_ADDR_FORCED)) {
@@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
return 0;
+ /* Don't keep track of shadow copy since we don't intend to restore. */
+ if (p_vf->p_vf_info.is_trusted_configured)
+ return 0;
+
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
/* No real decision to make; Store the configured MAC */
if (params->type == QED_FILTER_MAC ||
- params->type == QED_FILTER_MAC_VLAN)
+ params->type == QED_FILTER_MAC_VLAN) {
ether_addr_copy(vf->mac, params->mac);
+ if (vf->is_trusted_configured) {
+ qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
+
+ /* Update and post bulleitin again */
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
return 0;
}
@@ -3803,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
}
+static int
+qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
+
+ if (!p_vf->p_vf_info.is_trusted_configured) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Blocking bulletin update request from untrusted VF[%d]\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ rc = -EINVAL;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->bulletin_update_mac;
+ ether_addr_copy(p_bulletin->mac, p_req->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
+ p_vf->abs_vf_id, p_req->mac);
+
+send_status:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(struct pfvf_def_resp_tlv), status);
+ return rc;
+}
+
static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid)
{
@@ -3882,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_READ:
qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
+ qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -4081,16 +4135,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
+ if (vf_info->p_vf_info.is_trusted_configured) {
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ } else {
+ feature = BIT(MAC_ADDR_FORCED);
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(VFPF_BULLETIN_MAC_ADDR);
+ }
+
memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return -EINVAL;
+ }
+
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ if (vf_info->p_vf_info.is_trusted_configured)
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
+ return 0;
+}
+
static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
u16 pvid, int vfid)
{
@@ -4204,6 +4302,21 @@ out:
return rc;
}
+static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ BIT(VFPF_BULLETIN_MAC_ADDR)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
@@ -4493,8 +4606,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
if (!vf_info)
continue;
- /* Set the forced MAC, and schedule the IOV task */
- ether_addr_copy(vf_info->forced_mac, mac);
+ /* Set the MAC, and schedule the IOV task */
+ if (vf_info->is_trusted_configured)
+ ether_addr_copy(vf_info->mac, mac);
+ else
+ ether_addr_copy(vf_info->forced_mac, mac);
+
qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
}
@@ -4802,6 +4919,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
+static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
+ u8 *mac,
+ struct qed_public_vf_info *info)
+{
+ if (info->is_trusted_configured) {
+ if (is_valid_ether_addr(info->mac) &&
+ (!mac || !ether_addr_equal(mac, info->mac)))
+ return true;
+ } else {
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac)))
+ return true;
+ }
+
+ return false;
+}
+
+static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
+ struct qed_public_vf_info *info,
+ int vfid)
+{
+ if (info->is_trusted_configured)
+ qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
+ else
+ qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
+}
+
static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
{
int i;
@@ -4816,18 +4960,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
continue;
/* Update data on bulletin board */
- mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
- if (is_valid_ether_addr(info->forced_mac) &&
- (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ if (info->is_trusted_configured)
+ mac = qed_iov_bulletin_get_mac(hwfn, i);
+ else
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+
+ if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
DP_VERBOSE(hwfn,
QED_MSG_IOV,
"Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
i,
hwfn->cdev->p_iov_info->first_vf_in_pf + i);
- /* Update bulletin board with forced MAC */
- qed_iov_bulletin_set_forced_mac(hwfn,
- info->forced_mac, i);
+ /* Update bulletin board with MAC */
+ qed_set_bulletin_mac(hwfn, info, i);
update = true;
}
@@ -4867,6 +5013,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
+static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
+{
+ struct qed_public_vf_info *vf_info;
+ struct qed_vf_info *vf;
+ u8 *force_mac;
+ int i;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ vf = qed_iov_get_vf_info(hwfn, vf_id, true);
+
+ if (!vf_info || !vf)
+ return;
+
+ /* Force MAC converted to generic MAC in case of VF trust on */
+ if (vf_info->is_trusted_configured &&
+ (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
+ force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
+
+ if (force_mac) {
+ /* Clear existing shadow copy of MAC to have a clean
+ * slate.
+ */
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ vf_info->mac)) {
+ memset(vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+
+ ether_addr_copy(vf_info->mac, force_mac);
+ memset(vf_info->forced_mac, 0, ETH_ALEN);
+ vf->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
+ /* Update shadow copy with VF MAC when trust mode is turned off */
+ if (!vf_info->is_trusted_configured) {
+ u8 empty_mac[ETH_ALEN];
+
+ memset(empty_mac, 0, ETH_ALEN);
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ empty_mac)) {
+ ether_addr_copy(vf->shadow_config.macs[i],
+ vf_info->mac);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+ /* Clear bulletin when trust mode is turned off,
+ * to have a clean slate for next (normal) operations.
+ */
+ qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
{
struct qed_sp_vport_update_params params;
@@ -4890,6 +5102,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
continue;
vf_info->is_trusted_configured = vf_info->is_trusted_request;
+ /* Handle forced MAC mode */
+ qed_update_mac_for_vf_trust_change(hwfn, i);
+
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
if (!vf->vport_instance)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 91b5e9f02a62..2d7fcd6a0777 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1375,6 +1375,35 @@ exit:
}
int
+qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ u8 *p_mac)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ int rc;
+
+ if (!p_mac)
+ return -EINVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(*p_req));
+ ether_addr_copy(p_req->mac, p_mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requesting bulletin update for MAC[%pM]\n", p_mac);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+int
qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 97d44dfb38ca..4f05d5eb3cf5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv {
u8 padding[6];
};
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -532,6 +538,7 @@ union vfpf_tlvs {
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
struct tlv_buffer_size tlv_buf_size;
};
@@ -650,6 +657,7 @@ enum {
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
+/**
+ * @brief - Ask PF to update the MAC address in it's bulletin board
+ *
+ * @param p_mac - mac address to be updated in bulletin board
+ */
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ u8 *p_mac)
+{
+ return -EINVAL;
+}
+
static inline u32
qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9935978c5542..2d3f09ed413b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -290,15 +290,12 @@ struct qede_agg_info {
* aggregation.
*/
struct sw_rx_data buffer;
- dma_addr_t buffer_mapping;
-
struct sk_buff *skb;
/* We need some structs from the start cookie until termination */
u16 vlan_tag;
- u16 start_cqe_bd_len;
- u8 start_cqe_placement_offset;
+ bool tpa_start_fail;
u8 state;
u8 frag_id;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ecbf1ded7a39..8c6fdad91986 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1508,7 +1508,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
len = le16_to_cpu(fp_cqe->len_on_first_bd);
data_ptr = (u8 *)(page_address(sw_rx_data->data) +
fp_cqe->placement_offset +
- sw_rx_data->page_offset);
+ sw_rx_data->page_offset +
+ rxq->rx_headroom);
if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
ether_addr_equal(data_ptr + ETH_ALEN,
edev->ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 6687e04d1558..43569b1839be 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -550,8 +550,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
__qede_lock(edev);
- /* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
+ if (!is_valid_ether_addr(mac)) {
__qede_unlock(edev);
return;
}
@@ -1161,6 +1160,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
if (edev->state != QEDE_STATE_OPEN) {
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"The device is currently down\n");
+ /* Ask PF to explicitly update a copy in bulletin board */
+ if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
+ edev->ops->req_bulletin_update_mac(edev->cdev,
+ ndev->dev_addr);
goto out;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 14941303189d..6c702399b801 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, tpa_info->frag_id++,
- current_bd->data, current_bd->page_offset,
+ current_bd->data,
+ current_bd->page_offset + rxq->rx_headroom,
len_on_bd);
if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
@@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
goto out;
}
- qed_chain_consume(&rxq->rx_bd_ring);
- rxq->sw_rx_cons++;
+ qede_rx_bd_ring_consume(rxq);
skb->data_len += len_on_bd;
skb->truesize += rxq->rx_buf_seg_size;
@@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag)
return QEDE_CSUM_UNNECESSARY | tcsum;
}
+static inline struct sk_buff *
+qede_build_skb(struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb;
+ void *buf;
+
+ buf = page_address(bd->data) + bd->page_offset;
+ skb = build_skb(buf, rxq->rx_buf_seg_size);
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_tpa_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad,
+ bool alloc_skb)
+{
+ struct sk_buff *skb;
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+ bd->page_offset += rxq->rx_buf_seg_size;
+
+ if (bd->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ DP_NOTICE(edev,
+ "Failed to allocate RX buffer for tpa start\n");
+ bd->page_offset -= rxq->rx_buf_seg_size;
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ } else {
+ page_ref_inc(bd->data);
+ qede_reuse_page(rxq, bd);
+ }
+
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb = NULL;
+
+ /* For smaller frames still need to allocate skb, memcpy
+ * data and benefit in reusing the page segment instead of
+ * un-mapping it.
+ */
+ if ((len + pad <= edev->rx_copybreak)) {
+ unsigned int offset = bd->page_offset + pad;
+
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, pad);
+ memcpy(skb_put(skb, len),
+ page_address(bd->data) + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
static void qede_tpa_start(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct eth_fast_path_rx_tpa_start_cqe *cqe)
{
struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
- struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
- dma_addr_t mapping = tpa_info->buffer_mapping;
struct sw_rx_data *sw_rx_data_cons;
- struct sw_rx_data *sw_rx_data_prod;
+ u16 pad;
sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ pad = cqe->placement_offset + rxq->rx_headroom;
- /* Use pre-allocated replacement buffer - we can't release the agg.
- * start until its over and we don't want to risk allocation failing
- * here, so re-allocate when aggregation will be over.
- */
- sw_rx_data_prod->mapping = replace_buf->mapping;
-
- sw_rx_data_prod->data = replace_buf->data;
- rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
- sw_rx_data_prod->page_offset = replace_buf->page_offset;
-
- rxq->sw_rx_prod++;
+ tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
+ le16_to_cpu(cqe->len_on_first_bd),
+ pad, false);
+ tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
+ tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
- /* move partial skb from cons to pool (don't unmap yet)
- * save mapping, incase we drop the packet later on.
- */
- tpa_info->buffer = *sw_rx_data_cons;
- mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
- le32_to_cpu(rx_bd_cons->addr.lo));
-
- tpa_info->buffer_mapping = mapping;
- rxq->sw_rx_cons++;
-
- /* set tpa state to start only if we are able to allocate skb
- * for this aggregation, otherwise mark as error and aggregation will
- * be dropped
- */
- tpa_info->skb = netdev_alloc_skb(edev->ndev,
- le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+
+ /* Consume from ring but do not produce since
+ * this might be used by FW still, it will be re-used
+ * at TPA end.
+ */
+ tpa_info->tpa_start_fail = true;
+ qede_rx_bd_ring_consume(rxq);
tpa_info->state = QEDE_AGG_STATE_ERROR;
goto cons_buf;
}
- /* Start filling in the aggregation info */
- skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
tpa_info->frag_id = 0;
tpa_info->state = QEDE_AGG_STATE_START;
- /* Store some information from first CQE */
- tpa_info->start_cqe_placement_offset = cqe->placement_offset;
- tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
if ((le16_to_cpu(cqe->pars_flags.flags) >>
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
@@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev,
tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
skb = tpa_info->skb;
+ if (tpa_info->buffer.page_offset == PAGE_SIZE)
+ dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
+ PAGE_SIZE, rxq->data_direction);
+
for (i = 0; cqe->len_list[i]; i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev,
"Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
le16_to_cpu(cqe->total_packet_len), skb->len);
- memcpy(skb->data,
- page_address(tpa_info->buffer.data) +
- tpa_info->start_cqe_placement_offset +
- tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
-
/* Finalize the SKB */
skb->protocol = eth_type_trans(skb, edev->ndev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev,
return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ if (tpa_info->tpa_start_fail) {
+ qede_reuse_page(rxq, &tpa_info->buffer);
+ tpa_info->tpa_start_fail = false;
+ }
+
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
return 0;
@@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev,
return false;
}
-static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
- struct sw_rx_data *bd, u16 len,
- u16 pad)
-{
- unsigned int offset = bd->page_offset + pad;
- struct skb_frag_struct *frag;
- struct page *page = bd->data;
- unsigned int pull_len;
- struct sk_buff *skb;
- unsigned char *va;
-
- /* Allocate a new SKB with a sufficient large header len */
- skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
- if (unlikely(!skb))
- return NULL;
-
- /* Copy data into SKB - if it's small, we can simply copy it and
- * re-use the already allcoated & mapped memory.
- */
- if (len + pad <= edev->rx_copybreak) {
- skb_put_data(skb, page_address(page) + offset, len);
- qede_reuse_page(rxq, bd);
- goto out;
- }
-
- frag = &skb_shinfo(skb)->frags[0];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, offset, len, rxq->rx_buf_seg_size);
-
- va = skb_frag_address(frag);
- pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
- /* Align the pull_len to optimize memcpy */
- memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
- /* Correct the skb & frag sizes offset after the pull */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
- /* Incr page ref count to reuse on allocation failure so
- * that it doesn't get freed while freeing SKB [as its
- * already mapped there].
- */
- page_ref_inc(page);
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
-out:
- /* We've consumed the first BD and prepared an SKB */
- qede_rx_bd_ring_consume(rxq);
- return skb;
-}
-
static int qede_rx_build_jumbo(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct sk_buff *skb,
@@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
PAGE_SIZE, DMA_FROM_DEVICE);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
- bd->data, 0, cur_size);
+ bd->data, rxq->rx_headroom, cur_size);
skb->truesize += PAGE_SIZE;
skb->data_len += cur_size;
@@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
/* Basic validation passed; Need to prepare an SKB. This would also
* guarantee to finally consume the first BD upon success.
*/
- skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+ skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
if (!skb) {
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a01e7d6e5442..9e70f713c3c5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_get_eth_tlv_data(void *edev, void *data);
+static void qede_get_generic_tlv_data(void *edev,
+ struct qed_generic_tlvs *data);
/* The qede lock is used to protect driver state change and driver flows that
* are not reentrant.
@@ -199,7 +202,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
/* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
- qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
vport_params->vport_id = 0;
vport_params->update_tx_switching_flg = 1;
vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
@@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
+ .get_generic_tlv_data = qede_get_generic_tlv_data,
+ .get_protocol_tlv_data = qede_get_eth_tlv_data,
},
.force_mac = qede_force_mac,
.ports_update = qede_udp_ports_update,
@@ -1066,13 +1071,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
+ qede_rdma_dev_remove(edev);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
qede_ptp_disable(edev);
- qede_rdma_dev_remove(edev);
-
edev->ops->common->set_power_state(cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
@@ -1197,30 +1201,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
}
}
-static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
-{
- int i;
-
- if (edev->gro_disable)
- return;
-
- for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
- struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
-
- if (replace_buf->data) {
- dma_unmap_page(&edev->pdev->dev,
- replace_buf->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(replace_buf->data);
- }
- }
-}
-
static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
- qede_free_sge_mem(edev, rxq);
-
/* Free rx buffers */
qede_free_rx_buffers(edev, rxq);
@@ -1232,45 +1214,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
}
-static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+static void qede_set_tpa_param(struct qede_rx_queue *rxq)
{
- dma_addr_t mapping;
int i;
- if (edev->gro_disable)
- return 0;
-
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->buffer;
- replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
- if (unlikely(!replace_buf->data)) {
- DP_NOTICE(edev,
- "Failed to allocate TPA skb pool [replacement buffer]\n");
- goto err;
- }
-
- mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- DP_NOTICE(edev,
- "Failed to map TPA replacement buffer\n");
- goto err;
- }
-
- replace_buf->mapping = mapping;
- tpa_info->buffer.page_offset = 0;
- tpa_info->buffer_mapping = mapping;
tpa_info->state = QEDE_AGG_STATE_NONE;
}
-
- return 0;
-err:
- qede_free_sge_mem(edev, rxq);
- edev->gro_disable = 1;
- edev->ndev->features &= ~NETIF_F_GRO_HW;
- return -ENOMEM;
}
/* This function allocates all memory needed per Rx queue */
@@ -1281,19 +1233,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
- rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
+
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
+ size = rxq->rx_headroom +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Make sure that the headroom and payload fit in a single page */
- if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
- rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
+ if (rxq->rx_buf_size + size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts,
+ /* Segment size to spilt a page in multiple equal parts ,
* unless XDP is used in which case we'd use the entire page.
*/
- if (!edev->xdp_prog)
- rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
- else
+ if (!edev->xdp_prog) {
+ size = size + rxq->rx_buf_size;
+ rxq->rx_buf_seg_size = roundup_pow_of_two(size);
+ } else {
rxq->rx_buf_seg_size = PAGE_SIZE;
+ }
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -1337,7 +1294,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
}
- rc = qede_alloc_sge_mem(edev, rxq);
+ if (!edev->gro_disable)
+ qede_set_tpa_param(rxq);
err:
return rc;
}
@@ -1928,7 +1886,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
vport_update_params->update_vport_active_flg = 1;
vport_update_params->vport_active_flg = 1;
- if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) {
vport_update_params->update_tx_switching_flg = 1;
vport_update_params->tx_switching_flg = 1;
@@ -2178,3 +2136,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
}
+
+static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ if (netif_xmit_stopped(netdev_txq))
+ return true;
+
+ return false;
+}
+
+static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+ struct qede_dev *edev = dev;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (edev->ndev->features & NETIF_F_IP_CSUM)
+ data->feat_flags |= QED_TLV_IP_CSUM;
+ if (edev->ndev->features & NETIF_F_TSO)
+ data->feat_flags |= QED_TLV_LSO;
+
+ ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
+ memset(data->mac[1], 0, ETH_ALEN);
+ memset(data->mac[2], 0, ETH_ALEN);
+ /* Copy the first two UC macs */
+ netif_addr_lock_bh(edev->ndev);
+ i = 1;
+ netdev_for_each_uc_addr(ha, edev->ndev) {
+ ether_addr_copy(data->mac[i++], ha->addr);
+ if (i == QED_TLV_MAC_COUNT)
+ break;
+ }
+
+ netif_addr_unlock_bh(edev->ndev);
+}
+
+static void qede_get_eth_tlv_data(void *dev, void *data)
+{
+ struct qed_mfw_tlv_eth *etlv = data;
+ struct qede_dev *edev = dev;
+ struct qede_fastpath *fp;
+ int i;
+
+ etlv->lso_maxoff_size = 0XFFFF;
+ etlv->lso_maxoff_size_set = true;
+ etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
+ etlv->lso_minseg_size_set = true;
+ etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
+ etlv->prom_mode_set = true;
+ etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
+ etlv->tx_descr_size_set = true;
+ etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
+ etlv->rx_descr_size_set = true;
+ etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
+ etlv->iov_offload_set = true;
+
+ /* Fill information regarding queues; Should be done under the qede
+ * lock to guarantee those don't change beneath our feet.
+ */
+ etlv->txqs_empty = true;
+ etlv->rxqs_empty = true;
+ etlv->num_txqs_full = 0;
+ etlv->num_rxqs_full = 0;
+
+ __qede_lock(edev);
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ etlv->txqs_empty = false;
+ if (qede_is_txq_full(edev, fp->txq))
+ etlv->num_txqs_full++;
+ }
+ if (fp->type & QEDE_FASTPATH_RX) {
+ if (qede_has_rx_work(fp->rxq))
+ etlv->rxqs_empty = false;
+
+ /* This one is a bit tricky; Firmware might stop
+ * placing packets if ring is not yet full.
+ * Give an approximation.
+ */
+ if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
+ qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
+ RX_RING_SIZE - 100)
+ etlv->num_rxqs_full++;
+ }
+ }
+ __qede_unlock(edev);
+
+ etlv->txqs_empty_set = true;
+ etlv->rxqs_empty_set = true;
+ etlv->num_txqs_full_set = true;
+ etlv->num_rxqs_full_set = true;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 50b142fad6b8..1900bf7e67d1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev)
}
if (!found) {
- event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+ event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
if (!event_node) {
DP_NOTICE(edev,
"qedr: Could not allocate memory for rdma work\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index d5a32b7c7dc5..031f6e6ee9c1 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -683,10 +683,11 @@ static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
struct emac_tx_queue *tx_q)
{
struct emac_ring_header *ring_header = &adpt->ring_header;
+ int node = dev_to_node(adpt->netdev->dev.parent);
size_t size;
size = sizeof(struct emac_buffer) * tx_q->tpd.count;
- tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL);
+ tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
if (!tx_q->tpd.tpbuff)
return -ENOMEM;
@@ -723,11 +724,12 @@ static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
static int emac_rx_descs_alloc(struct emac_adapter *adpt)
{
struct emac_ring_header *ring_header = &adpt->ring_header;
+ int node = dev_to_node(adpt->netdev->dev.parent);
struct emac_rx_queue *rx_q = &adpt->rx_q;
size_t size;
size = sizeof(struct emac_buffer) * rx_q->rfd.count;
- rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL);
+ rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
if (!rx_q->rfd.rfbuff)
return -ENOMEM;
@@ -920,14 +922,13 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
static void emac_adjust_link(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
- struct emac_sgmii *sgmii = &adpt->phy;
struct phy_device *phydev = netdev->phydev;
if (phydev->link) {
emac_mac_start(adpt);
- sgmii->link_up(adpt);
+ emac_sgmii_link_change(adpt, true);
} else {
- sgmii->link_down(adpt);
+ emac_sgmii_link_change(adpt, false);
emac_mac_stop(adpt);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index e8ab512ee7e3..562420b834df 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -53,6 +53,46 @@
#define SERDES_START_WAIT_TIMES 100
+int emac_sgmii_init(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->init))
+ return 0;
+
+ return adpt->phy.sgmii_ops->init(adpt);
+}
+
+int emac_sgmii_open(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->open))
+ return 0;
+
+ return adpt->phy.sgmii_ops->open(adpt);
+}
+
+void emac_sgmii_close(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->close))
+ return;
+
+ adpt->phy.sgmii_ops->close(adpt);
+}
+
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->link_change))
+ return 0;
+
+ return adpt->phy.sgmii_ops->link_change(adpt, link_state);
+}
+
+void emac_sgmii_reset(struct emac_adapter *adpt)
+{
+ if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->reset))
+ return;
+
+ adpt->phy.sgmii_ops->reset(adpt);
+}
+
/* Initialize the SGMII link between the internal and external PHYs. */
static void emac_sgmii_link_init(struct emac_adapter *adpt)
{
@@ -163,21 +203,21 @@ static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
msleep(50);
}
-void emac_sgmii_reset(struct emac_adapter *adpt)
+static void emac_sgmii_common_reset(struct emac_adapter *adpt)
{
int ret;
emac_sgmii_reset_prepare(adpt);
emac_sgmii_link_init(adpt);
- ret = adpt->phy.initialize(adpt);
+ ret = emac_sgmii_init(adpt);
if (ret)
netdev_err(adpt->netdev,
"could not reinitialize internal PHY (error=%i)\n",
ret);
}
-static int emac_sgmii_open(struct emac_adapter *adpt)
+static int emac_sgmii_common_open(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = &adpt->phy;
int ret;
@@ -201,43 +241,53 @@ static int emac_sgmii_open(struct emac_adapter *adpt)
return 0;
}
-static int emac_sgmii_close(struct emac_adapter *adpt)
+static void emac_sgmii_common_close(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = &adpt->phy;
/* Make sure interrupts are disabled */
writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
free_irq(sgmii->irq, adpt);
-
- return 0;
}
/* The error interrupts are only valid after the link is up */
-static int emac_sgmii_link_up(struct emac_adapter *adpt)
+static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup)
{
struct emac_sgmii *sgmii = &adpt->phy;
int ret;
- /* Clear and enable interrupts */
- ret = emac_sgmii_irq_clear(adpt, 0xff);
- if (ret)
- return ret;
+ if (linkup) {
+ /* Clear and enable interrupts */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
- writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ writel(SGMII_ISR_MASK,
+ sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ } else {
+ /* Disable interrupts */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ synchronize_irq(sgmii->irq);
+ }
return 0;
}
-static int emac_sgmii_link_down(struct emac_adapter *adpt)
-{
- struct emac_sgmii *sgmii = &adpt->phy;
-
- /* Disable interrupts */
- writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
- synchronize_irq(sgmii->irq);
+static struct sgmii_ops qdf2432_ops = {
+ .init = emac_sgmii_init_qdf2432,
+ .open = emac_sgmii_common_open,
+ .close = emac_sgmii_common_close,
+ .link_change = emac_sgmii_common_link_change,
+ .reset = emac_sgmii_common_reset,
+};
- return 0;
-}
+static struct sgmii_ops qdf2400_ops = {
+ .init = emac_sgmii_init_qdf2400,
+ .open = emac_sgmii_common_open,
+ .close = emac_sgmii_common_close,
+ .link_change = emac_sgmii_common_link_change,
+ .reset = emac_sgmii_common_reset,
+};
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
@@ -249,7 +299,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- emac_sgmii_function *initialize = data;
+ struct sgmii_ops **ops = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -270,10 +320,10 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
switch (hrv) {
case 1:
- *initialize = emac_sgmii_init_qdf2432;
+ *ops = &qdf2432_ops;
return 1;
case 2:
- *initialize = emac_sgmii_init_qdf2400;
+ *ops = &qdf2400_ops;
return 1;
}
}
@@ -294,14 +344,6 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
{}
};
-/* Dummy function for systems without an internal PHY. This avoids having
- * to check for NULL pointers before calling the functions.
- */
-static int emac_sgmii_dummy(struct emac_adapter *adpt)
-{
- return 0;
-}
-
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
@@ -312,22 +354,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
if (has_acpi_companion(&pdev->dev)) {
struct device *dev;
- dev = device_find_child(&pdev->dev, &phy->initialize,
+ dev = device_find_child(&pdev->dev, &phy->sgmii_ops,
emac_sgmii_acpi_match);
if (!dev) {
dev_warn(&pdev->dev, "cannot find internal phy node\n");
- /* There is typically no internal PHY on emulation
- * systems, so if we can't find the node, assume
- * we are on an emulation system and stub-out
- * support for the internal PHY. These systems only
- * use ACPI.
- */
- phy->open = emac_sgmii_dummy;
- phy->close = emac_sgmii_dummy;
- phy->link_up = emac_sgmii_dummy;
- phy->link_down = emac_sgmii_dummy;
-
return 0;
}
@@ -355,14 +386,9 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- phy->initialize = (emac_sgmii_function)match->data;
+ phy->sgmii_ops->init = match->data;
}
- phy->open = emac_sgmii_open;
- phy->close = emac_sgmii_close;
- phy->link_up = emac_sgmii_link_up;
- phy->link_down = emac_sgmii_link_down;
-
/* Base address is the first address */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -386,7 +412,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
}
}
- ret = phy->initialize(adpt);
+ ret = emac_sgmii_init(adpt);
if (ret)
goto error;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
index e7c0c3b2baa4..31ba21eb61d2 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -16,36 +16,44 @@
struct emac_adapter;
struct platform_device;
-typedef int (*emac_sgmii_function)(struct emac_adapter *adpt);
+/** emac_sgmii - internal emac phy
+ * @init initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_change called when the link state changes
+ */
+struct sgmii_ops {
+ int (*init)(struct emac_adapter *adpt);
+ int (*open)(struct emac_adapter *adpt);
+ void (*close)(struct emac_adapter *adpt);
+ int (*link_change)(struct emac_adapter *adpt, bool link_state);
+ void (*reset)(struct emac_adapter *adpt);
+};
/** emac_sgmii - internal emac phy
* @base base address
* @digital per-lane digital block
* @irq the interrupt number
* @decode_error_count reference count of consecutive decode errors
- * @initialize initialization function
- * @open called when the driver is opened
- * @close called when the driver is closed
- * @link_up called when the link comes up
- * @link_down called when the link comes down
+ * @sgmii_ops sgmii ops
*/
struct emac_sgmii {
void __iomem *base;
void __iomem *digital;
unsigned int irq;
atomic_t decode_error_count;
- emac_sgmii_function initialize;
- emac_sgmii_function open;
- emac_sgmii_function close;
- emac_sgmii_function link_up;
- emac_sgmii_function link_down;
+ struct sgmii_ops *sgmii_ops;
};
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
-void emac_sgmii_reset(struct emac_adapter *adpt);
int emac_sgmii_init_fsm9900(struct emac_adapter *adpt);
int emac_sgmii_init_qdf2432(struct emac_adapter *adpt);
int emac_sgmii_init_qdf2400(struct emac_adapter *adpt);
+int emac_sgmii_init(struct emac_adapter *adpt);
+int emac_sgmii_open(struct emac_adapter *adpt);
+void emac_sgmii_close(struct emac_adapter *adpt);
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state);
+void emac_sgmii_reset(struct emac_adapter *adpt);
#endif
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 13235baf4766..2a0cbc535a2e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,7 +253,7 @@ static int emac_open(struct net_device *netdev)
return ret;
}
- ret = adpt->phy.open(adpt);
+ ret = emac_sgmii_open(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
@@ -264,7 +264,7 @@ static int emac_open(struct net_device *netdev)
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
- adpt->phy.close(adpt);
+ emac_sgmii_close(adpt);
return ret;
}
@@ -278,7 +278,7 @@ static int emac_close(struct net_device *netdev)
mutex_lock(&adpt->reset_lock);
- adpt->phy.close(adpt);
+ emac_sgmii_close(adpt);
emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
@@ -761,11 +761,10 @@ static void emac_shutdown(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
- struct emac_sgmii *sgmii = &adpt->phy;
if (netdev->flags & IFF_UP) {
/* Closing the SGMII turns off its interrupts */
- sgmii->close(adpt);
+ emac_sgmii_close(adpt);
/* Resetting the MAC turns off all DMA and its interrupts */
emac_mac_reset(adpt);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index d33988570217..5f4e447c5dce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
real_dev = priv->real_dev;
- if (!rmnet_is_real_dev_registered(real_dev))
- return -ENODEV;
-
if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
goto nla_put_failure;
- port = rmnet_get_port_rtnl(real_dev);
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ f.flags = port->data_format;
+ } else {
+ f.flags = 0;
+ }
- f.flags = port->data_format;
f.mask = ~0;
if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 0b5b5da80198..34ac45a774e7 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -54,11 +54,24 @@ struct rmnet_pcpu_stats {
struct u64_stats_sync syncp;
};
+struct rmnet_priv_stats {
+ u64 csum_ok;
+ u64 csum_valid_unset;
+ u64 csum_validation_failed;
+ u64 csum_err_bad_buffer;
+ u64 csum_err_invalid_ip_version;
+ u64 csum_err_invalid_transport;
+ u64 csum_fragmented_pkt;
+ u64 csum_skipped;
+ u64 csum_sw;
+};
+
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
+ struct rmnet_priv_stats stats;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 6fcd586e9804..7fd86d40a337 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -148,7 +148,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
- goto fail;
+ return -ENOMEM;
}
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
@@ -156,17 +156,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
- goto fail;
+ return -ENOMEM;
map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
return 0;
-
-fail:
- kfree_skb(skb);
- return -ENOMEM;
}
static void
@@ -228,15 +224,18 @@ void rmnet_egress_handler(struct sk_buff *skb)
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
- if (!port) {
- kfree_skb(skb);
- return;
- }
+ if (!port)
+ goto drop;
if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
- return;
+ goto drop;
rmnet_vnd_tx_fixup(skb, orig_dev);
dev_queue_xmit(skb);
+ return;
+
+drop:
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 78fdad0c6f76..56a93df962e6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -69,17 +69,9 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
struct rmnet_map_control_command *cmd;
int xmit_status;
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
- if (skb->len < sizeof(struct rmnet_map_header) +
- RMNET_MAP_GET_LENGTH(skb) +
- sizeof(struct rmnet_map_dl_csum_trailer)) {
- kfree_skb(skb);
- return;
- }
-
- skb_trim(skb, skb->len -
- sizeof(struct rmnet_map_dl_csum_trailer));
- }
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ skb_trim(skb,
+ skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a6ea09416f8d..57a9c314a665 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
static int
rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
- struct rmnet_map_dl_csum_trailer *csum_trailer)
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
{
__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
u16 csum_value, csum_value_final;
@@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
ip4h = (struct iphdr *)(skb->data);
if ((ntohs(ip4h->frag_off) & IP_MF) ||
- ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+ priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
+ }
txporthdr = skb->data + ip4h->ihl * 4;
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
- if (!csum_field)
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
+ }
/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
- if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+ priv->stats.csum_skipped++;
return 0;
+ }
csum_value = ~ntohs(csum_trailer->csum_value);
hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
@@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
}
}
- if (csum_value_final == ntohs((__force __be16)*csum_field))
+ if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+ priv->stats.csum_ok++;
return 0;
- else
+ } else {
+ priv->stats.csum_validation_failed++;
return -EINVAL;
+ }
}
#if IS_ENABLED(CONFIG_IPV6)
static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
- struct rmnet_map_dl_csum_trailer *csum_trailer)
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
{
__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
u16 csum_value, csum_value_final;
@@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
txporthdr = skb->data + sizeof(struct ipv6hdr);
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
- if (!csum_field)
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
+ }
csum_value = ~ntohs(csum_trailer->csum_value);
ip6_hdr_csum = (__force __be16)
@@ -164,10 +177,13 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
}
}
- if (csum_value_final == ntohs((__force __be16)*csum_field))
+ if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+ priv->stats.csum_ok++;
return 0;
- else
+ } else {
+ priv->stats.csum_validation_failed++;
return -EINVAL;
+ }
}
#endif
@@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
*/
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
struct rmnet_map_dl_csum_trailer *csum_trailer;
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
return -EOPNOTSUPP;
+ }
csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
- if (!csum_trailer->valid)
+ if (!csum_trailer->valid) {
+ priv->stats.csum_valid_unset++;
return -EINVAL;
+ }
- if (skb->protocol == htons(ETH_P_IP))
- return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
- else if (skb->protocol == htons(ETH_P_IPV6))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
- return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
#else
+ priv->stats.csum_err_invalid_ip_version++;
return -EPROTONOSUPPORT;
#endif
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ return -EPROTONOSUPPORT;
+ }
return 0;
}
@@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
@@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
return;
#else
+ priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
}
}
@@ -399,4 +429,6 @@ sw_csum:
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ip4_ind = 0;
+
+ priv->stats.csum_sw++;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 2ea16a088de8..cb02e1a015c1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = {
.ndo_get_stats64 = rmnet_get_stats64,
};
+static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "Checksum ok",
+ "Checksum valid bit not set",
+ "Checksum validation failed",
+ "Checksum error bad buffer",
+ "Checksum error bad ip version",
+ "Checksum error bad transport",
+ "Checksum skipped on ip fragment",
+ "Checksum skipped",
+ "Checksum computed in software",
+};
+
+static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &rmnet_gstrings_stats,
+ sizeof(rmnet_gstrings_stats));
+ break;
+ }
+}
+
+static int rmnet_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rmnet_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rmnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_priv_stats *st = &priv->stats;
+
+ if (!data)
+ return;
+
+ memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+}
+
+static const struct ethtool_ops rmnet_ethtool_ops = {
+ .get_ethtool_stats = rmnet_get_ethtool_stats,
+ .get_strings = rmnet_get_strings,
+ .get_sset_count = rmnet_get_sset_count,
+};
+
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
* flags, ARP type, needed headroom, etc...
*/
@@ -170,6 +220,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
rmnet_dev->needs_free_netdev = true;
+ rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
}
/* Exposed API */
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index d24b47b8e0b2..d118da5a10a2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fcd42d0387b9..75dfac0248f4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -410,13 +410,8 @@ enum rtl8168_8101_registers {
CSIAR = 0x68,
#define CSIAR_FLAG 0x80000000
#define CSIAR_WRITE_CMD 0x80000000
-#define CSIAR_BYTE_ENABLE 0x0f
-#define CSIAR_BYTE_ENABLE_SHIFT 12
-#define CSIAR_ADDR_MASK 0x0fff
-#define CSIAR_FUNC_CARD 0x00000000
-#define CSIAR_FUNC_SDIO 0x00010000
-#define CSIAR_FUNC_NIC 0x00020000
-#define CSIAR_FUNC_NIC2 0x00010000
+#define CSIAR_BYTE_ENABLE 0x0000f000
+#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
@@ -599,6 +594,7 @@ enum rtl_register_content {
RxChkSum = (1 << 5),
PCIDAC = (1 << 4),
PCIMulRW = (1 << 3),
+#define INTT_MASK GENMASK(1, 0)
INTT_0 = 0x0000, // 8168
INTT_1 = 0x0001, // 8168
INTT_2 = 0x0002, // 8168
@@ -689,6 +685,7 @@ enum rtl_rx_desc_bit {
};
#define RsvdMask 0x3fffc000
+#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
struct TxDesc {
__le32 opts1;
@@ -774,21 +771,11 @@ struct rtl8169_private {
int (*read)(struct rtl8169_private *, int);
} mdio_ops;
- struct pll_power_ops {
- void (*down)(struct rtl8169_private *);
- void (*up)(struct rtl8169_private *);
- } pll_power_ops;
-
struct jumbo_ops {
void (*enable)(struct rtl8169_private *);
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- struct csi_ops {
- void (*write)(struct rtl8169_private *, int, int);
- u32 (*read)(struct rtl8169_private *, int);
- } csi_ops;
-
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
int (*get_link_ksettings)(struct net_device *,
struct ethtool_link_ksettings *);
@@ -1614,23 +1601,8 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
if (options & LinkUp)
wolopts |= WAKE_PHY;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
wolopts |= WAKE_MAGIC;
break;
@@ -1691,23 +1663,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
tmp = ARRAY_SIZE(cfg) - 1;
if (wolopts & WAKE_MAGIC)
rtl_w0w1_eri(tp,
@@ -1935,12 +1892,14 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
return features;
}
-static void __rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
u32 rx_config;
+ rtl_lock_work(tp);
+
rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
rx_config |= (AcceptErr | AcceptRunt);
@@ -1959,28 +1918,14 @@ static void __rtl8169_set_features(struct net_device *dev,
else
tp->cp_cmd &= ~RxVlan;
- tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum);
-
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
-}
-
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
-
- rtl_lock_work(tp);
- if (features ^ dev->features)
- __rtl8169_set_features(dev, features);
rtl_unlock_work(tp);
return 0;
}
-
static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
return (skb_vlan_tag_present(skb)) ?
@@ -2354,7 +2299,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3];
+ scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
/* read IntrMitigate and adjust according to scale */
for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
@@ -2453,7 +2398,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
RTL_W16(tp, IntrMitigate, swab16(w));
- tp->cp_cmd = (tp->cp_cmd & ~3) | cp01;
+ tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
@@ -2483,7 +2428,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- struct net_device *dev, u8 default_version)
+ u8 default_version)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2588,8 +2533,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
tp->mac_version = p->mac_version;
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- netif_notice(tp, probe, dev,
- "unknown MAC, using family default\n");
+ dev_notice(tp_to_dev(tp),
+ "unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
tp->mac_version = tp->mii.supports_gmii ?
@@ -4638,18 +4583,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
@@ -4694,21 +4628,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -4728,79 +4648,13 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
return true;
}
-static void r810x_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
-}
-
-static void r810x_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-}
-
-static void r810x_pll_power_down(struct rtl8169_private *tp)
-{
- if (rtl_wol_pll_power_down(tp))
- return;
-
- r810x_phy_power_down(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_16:
- break;
- default:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- }
-}
-
-static void r810x_pll_power_up(struct rtl8169_private *tp)
-{
- r810x_phy_power_up(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_16:
- break;
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- default:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- }
-}
-
static void r8168_phy_power_up(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_writephy(tp, 0x0e, 0x0000);
break;
@@ -4808,6 +4662,9 @@ static void r8168_phy_power_up(struct rtl8169_private *tp)
break;
}
rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void r8168_phy_power_down(struct rtl8169_private *tp)
@@ -4823,18 +4680,7 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_writephy(tp, 0x0e, 0x0200);
default:
@@ -4848,12 +4694,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (r8168_check_dash(tp))
return;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
- tp->mac_version == RTL_GIGA_MAC_VER_24) &&
- (RTL_R16(tp, CPlusCmd) & ASF)) {
- return;
- }
-
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
@@ -4864,16 +4704,15 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
r8168_phy_power_down(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
case RTL_GIGA_MAC_VER_45:
case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -4891,18 +4730,17 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
static void r8168_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
break;
case RTL_GIGA_MAC_VER_44:
case RTL_GIGA_MAC_VER_45:
case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4919,127 +4757,41 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
r8168_phy_power_up(tp);
}
-static void rtl_generic_op(struct rtl8169_private *tp,
- void (*op)(struct rtl8169_private *))
-{
- if (op)
- op(tp);
-}
-
static void rtl_pll_power_down(struct rtl8169_private *tp)
{
- rtl_generic_op(tp, tp->pll_power_ops.down);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
+ break;
+ default:
+ r8168_pll_power_down(tp);
+ }
}
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
- rtl_generic_op(tp, tp->pll_power_ops.up);
-}
-
-static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
-{
- struct pll_power_ops *ops = &tp->pll_power_ops;
-
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- ops->down = r810x_pll_power_down;
- ops->up = r810x_pll_power_up;
- break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- ops->down = r8168_pll_power_down;
- ops->up = r8168_pll_power_up;
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
break;
-
default:
- ops->down = NULL;
- ops->up = NULL;
- break;
+ r8168_pll_power_up(tp);
}
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- case RTL_GIGA_MAC_VER_04:
- case RTL_GIGA_MAC_VER_05:
- case RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_14:
- case RTL_GIGA_MAC_VER_15:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_18:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_35:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
@@ -5055,16 +4807,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- rtl_generic_op(tp, tp->jumbo_ops.enable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ if (tp->jumbo_ops.enable) {
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ tp->jumbo_ops.enable(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ }
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- rtl_generic_op(tp, tp->jumbo_ops.disable);
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ if (tp->jumbo_ops.disable) {
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ tp->jumbo_ops.disable(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ }
}
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5176,18 +4932,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -5273,32 +5018,21 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_rx_close(tp);
- if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36 ||
- tp->mac_version == RTL_GIGA_MAC_VER_37 ||
- tp->mac_version == RTL_GIGA_MAC_VER_38 ||
- tp->mac_version == RTL_GIGA_MAC_VER_40 ||
- tp->mac_version == RTL_GIGA_MAC_VER_41 ||
- tp->mac_version == RTL_GIGA_MAC_VER_42 ||
- tp->mac_version == RTL_GIGA_MAC_VER_43 ||
- tp->mac_version == RTL_GIGA_MAC_VER_44 ||
- tp->mac_version == RTL_GIGA_MAC_VER_45 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_47 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) {
+ break;
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
- } else {
+ break;
+ default:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
udelay(100);
+ break;
}
rtl_hw_reset(tp);
@@ -5311,10 +5045,10 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
(InterFrameGap << TxInterFrameGapShift));
}
-static void rtl_hw_start(struct rtl8169_private *tp)
+static void rtl_set_rx_max_size(struct rtl8169_private *tp)
{
- tp->hw_start(tp);
- rtl_irq_enable_all(tp);
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
}
static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
@@ -5330,21 +5064,6 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp)
-{
- u16 cmd;
-
- cmd = RTL_R16(tp, CPlusCmd);
- RTL_W16(tp, CPlusCmd, cmd);
- return cmd;
-}
-
-static void rtl_set_rx_max_size(struct rtl8169_private *tp)
-{
- /* Low hurts. Let's disable the filtering. */
- RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
-}
-
static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
static const struct rtl_cfg2_info {
@@ -5422,33 +5141,34 @@ static void rtl_set_rx_mode(struct net_device *dev)
RTL_W32(tp, RxConfig, tmp);
}
-static void rtl_hw_start_8169(struct rtl8169_private *tp)
+static void rtl_hw_start(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW);
- pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
- }
-
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_init_rxcfg(tp);
-
- RTL_W8(tp, EarlyTxThres, NoEarlyTx);
+ tp->hw_start(tp);
rtl_set_rx_max_size(tp);
+ rtl_set_rx_tx_desc_registers(tp);
+ rtl_set_rx_tx_config_registers(tp);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
- tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03 ||
- tp->mac_version == RTL_GIGA_MAC_VER_04)
- rtl_set_rx_tx_config_registers(tp);
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(tp, IntrMask);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_mode(tp->dev);
+ /* no early-rx interrupts */
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
+ rtl_irq_enable_all(tp);
+}
- tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW;
+static void rtl_hw_start_8169(struct rtl8169_private *tp)
+{
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ RTL_W8(tp, EarlyTxThres, NoEarlyTx);
+
+ tp->cp_cmd |= PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
@@ -5467,56 +5187,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
*/
RTL_W16(tp, IntrMitigate, 0x0000);
- rtl_set_rx_tx_desc_registers(tp);
-
- if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
- tp->mac_version != RTL_GIGA_MAC_VER_02 &&
- tp->mac_version != RTL_GIGA_MAC_VER_03 &&
- tp->mac_version != RTL_GIGA_MAC_VER_04) {
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_set_rx_tx_config_registers(tp);
- }
-
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
- /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(tp, IntrMask);
-
RTL_W32(tp, RxMissed, 0);
-
- rtl_set_rx_mode(tp->dev);
-
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
-}
-
-static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
-{
- if (tp->csi_ops.write)
- tp->csi_ops.write(tp, addr, value);
-}
-
-static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
-{
- return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
-}
-
-static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
-{
- u32 csi;
-
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | bits);
-}
-
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17000000);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x27000000);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -5524,101 +5195,55 @@ DECLARE_RTL_COND(rtl_csiar_cond)
return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
}
-static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
+static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
+ u32 func = PCI_FUNC(tp->pci_dev->devfn);
+
RTL_W32(tp, CSIDR, value);
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+ CSIAR_BYTE_ENABLE | func << 16);
rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
+static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+ u32 func = PCI_FUNC(tp->pci_dev->devfn);
+
+ RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
+ CSIAR_BYTE_ENABLE);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
-static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
+static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
{
- RTL_W32(tp, CSIDR, value);
- RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
- CSIAR_FUNC_NIC);
-
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
-}
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
-static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
-{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+ /* According to Realtek the value at config space address 0x070f
+ * controls the L0s/L1 entrance latency. We try standard ECAM access
+ * first and if it fails fall back to CSI.
+ */
+ if (pdev->cfg_size > 0x070f &&
+ pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
+ return;
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(tp, CSIDR) : ~0;
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
+ rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
+static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
{
- RTL_W32(tp, CSIDR, value);
- RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
- CSIAR_FUNC_NIC2);
-
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ rtl_csi_access_enable(tp, 0x17);
}
-static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
+static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
{
- RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
- CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(tp, CSIDR) : ~0;
-}
-
-static void rtl_init_csi_ops(struct rtl8169_private *tp)
-{
- struct csi_ops *ops = &tp->csi_ops;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- case RTL_GIGA_MAC_VER_04:
- case RTL_GIGA_MAC_VER_05:
- case RTL_GIGA_MAC_VER_06:
- case RTL_GIGA_MAC_VER_10:
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_13:
- case RTL_GIGA_MAC_VER_14:
- case RTL_GIGA_MAC_VER_15:
- case RTL_GIGA_MAC_VER_16:
- case RTL_GIGA_MAC_VER_17:
- ops->write = NULL;
- ops->read = NULL;
- break;
-
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- ops->write = r8402_csi_write;
- ops->read = r8402_csi_read;
- break;
-
- case RTL_GIGA_MAC_VER_44:
- ops->write = r8411_csi_write;
- ops->read = r8411_csi_read;
- break;
-
- default:
- ops->write = r8169_csi_write;
- ops->read = r8169_csi_read;
- break;
- }
+ rtl_csi_access_enable(tp, 0x27);
}
struct ephy_info {
@@ -5665,22 +5290,12 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
-#define R8168_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
-
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->dev->mtu <= ETH_DATA_LEN) {
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
@@ -5708,7 +5323,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
@@ -5737,7 +5353,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -5754,7 +5371,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -5811,7 +5429,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
}
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
@@ -6274,14 +5893,10 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(tp);
-
- tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1;
-
+ tp->cp_cmd &= ~INTT_MASK;
+ tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_W16(tp, IntrMitigate, 0x5151);
@@ -6292,12 +5907,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->event_slow &= ~RxOverflow;
}
- rtl_set_rx_tx_desc_registers(tp);
-
- rtl_set_rx_tx_config_registers(tp);
-
- RTL_R8(tp, IntrMask);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
rtl_hw_start_8168bb(tp);
@@ -6401,27 +6010,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->dev->name, tp->mac_version);
break;
}
-
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
-
- rtl_set_rx_mode(tp->dev);
-
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
-#define R810X_CPCMD_QUIRK_MASK (\
- EnableBist | \
- Mac_dbgo_oe | \
- Force_half_dup | \
- Force_rxflow_en | \
- Force_txflow_en | \
- Cxpl_dbg_sel | \
- ASF | \
- PktCntrDisable | \
- Mac_dbgo_sel)
-
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -6555,19 +6145,11 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_NOSNOOP_EN);
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(tp);
-
- tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ tp->cp_cmd &= CPCMD_QUIRK_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl_set_rx_tx_desc_registers(tp);
-
- rtl_set_rx_tx_config_registers(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(tp);
@@ -6604,17 +6186,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
break;
}
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-
RTL_W16(tp, IntrMitigate, 0x0000);
-
- RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
-
- rtl_set_rx_mode(tp->dev);
-
- RTL_R8(tp, IntrMask);
-
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6942,18 +6514,6 @@ static int msdn_giant_send_check(struct sk_buff *skb)
return ret;
}
-static inline __be16 get_protocol(struct sk_buff *skb)
-{
- __be16 protocol;
-
- if (skb->protocol == htons(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
- return protocol;
-}
-
static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
@@ -6990,7 +6550,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return false;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[0] |= TD1_GTSENV4;
break;
@@ -7022,7 +6582,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return false;
}
- switch (get_protocol(skb)) {
+ switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[1] |= TD1_IPv4_CS;
ip_protocol = ip_hdr(skb)->protocol;
@@ -7637,8 +7197,6 @@ static int rtl_open(struct net_device *dev)
rtl8169_init_phy(dev, tp);
- __rtl8169_set_features(dev, dev->features);
-
rtl_pll_power_up(tp);
rtl_hw_start(tp);
@@ -8045,20 +7603,10 @@ static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
rtl_hw_init_8168ep(tp);
break;
default:
@@ -8107,42 +7655,41 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
- netif_err(tp, probe, dev, "enable failure\n");
+ dev_err(&pdev->dev, "enable failure\n");
return rc;
}
if (pcim_set_mwi(pdev) < 0)
- netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+ dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
/* use first MMIO region */
region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
if (region < 0) {
- netif_err(tp, probe, dev, "no MMIO resource found\n");
+ dev_err(&pdev->dev, "no MMIO resource found\n");
return -ENODEV;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- netif_err(tp, probe, dev,
- "Invalid PCI region size(s), aborting\n");
+ dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
return -ENODEV;
}
rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
if (rc < 0) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
return rc;
}
tp->mmio_addr = pcim_iomap_table(pdev)[region];
if (!pci_is_pcie(pdev))
- netif_info(tp, probe, dev, "not PCI Express\n");
+ dev_info(&pdev->dev, "not PCI Express\n");
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ rtl8169_get_mac_version(tp, cfg->default_ver);
- tp->cp_cmd = 0;
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
@@ -8157,7 +7704,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
+ dev_err(&pdev->dev, "DMA configuration failed\n");
return rc;
}
}
@@ -8175,9 +7722,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
rtl_init_mdio_ops(tp);
- rtl_init_pll_power_ops(tp);
rtl_init_jumbo_ops(tp);
- rtl_init_csi_ops(tp);
rtl8169_print_mac_version(tp);
@@ -8185,7 +7730,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = rtl_alloc_irq(tp);
if (rc < 0) {
- netif_err(tp, probe, dev, "Can't allocate interrupt\n");
+ dev_err(&pdev->dev, "Can't allocate interrupt\n");
return rc;
}
@@ -8213,29 +7758,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
- if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36 ||
- tp->mac_version == RTL_GIGA_MAC_VER_37 ||
- tp->mac_version == RTL_GIGA_MAC_VER_38 ||
- tp->mac_version == RTL_GIGA_MAC_VER_40 ||
- tp->mac_version == RTL_GIGA_MAC_VER_41 ||
- tp->mac_version == RTL_GIGA_MAC_VER_42 ||
- tp->mac_version == RTL_GIGA_MAC_VER_43 ||
- tp->mac_version == RTL_GIGA_MAC_VER_44 ||
- tp->mac_version == RTL_GIGA_MAC_VER_45 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_47 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) {
- u16 mac_addr[3];
-
+ switch (tp->mac_version) {
+ u8 mac_addr[ETH_ALEN] __aligned(4);
+ case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
*(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
- *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
- if (is_valid_ether_addr((u8 *)mac_addr))
- rtl_rar_set(tp, (u8 *)mac_addr);
+ if (is_valid_ether_addr(mac_addr))
+ rtl_rar_set(tp, mac_addr);
+ break;
+ default:
+ break;
}
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b6b90a6314e3..d9cadfb1bc4a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -442,12 +442,22 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
int enum_index)
{
- iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->tsu_addr + offset);
}
static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
{
- return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
+
+ return ioread32(mdp->tsu_addr + offset);
}
static void sh_eth_select_mii(struct net_device *ndev)
@@ -456,6 +466,9 @@ static void sh_eth_select_mii(struct net_device *ndev)
u32 value;
switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
+ value = 0x3;
+ break;
case PHY_INTERFACE_MODE_GMII:
value = 0x2;
break;
@@ -693,7 +706,7 @@ static struct sh_eth_cpu_data rcar_gen1_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
@@ -725,7 +738,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
.fdr_value = 0x00000f0f,
@@ -740,6 +753,49 @@ static struct sh_eth_cpu_data rcar_gen2_data = {
.rmiimode = 1,
.magic = 1,
};
+
+/* R8A77980 */
+static struct sh_eth_cpu_data r8a77980_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_gether,
+
+ .register_type = SH_ETH_REG_GIGABIT,
+
+ .edtrr_trns = EDTRR_TRNS_GETHER,
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
+ ECSIPR_MPDIP,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
+
+ .tx_check = EESR_FTC | EESR_CD | EESR_TRO,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+ EESR_RFE | EESR_RDE | EESR_RFRMER |
+ EESR_TFE | EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .nbst = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .xdfar_rw = 1,
+ .hw_checksum = 1,
+ .select_mii = 1,
+ .magic = 1,
+ .cexcr = 1,
+};
#endif /* CONFIG_OF */
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -775,7 +831,7 @@ static struct sh_eth_cpu_data sh7724_data = {
EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
@@ -820,7 +876,7 @@ static struct sh_eth_cpu_data sh7757_data = {
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
- .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
@@ -1421,8 +1477,13 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
+ /* DMA transfer burst mode */
+ if (mdp->cd->nbst)
+ sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
+
+ /* Burst cycle count upper-limit */
if (mdp->cd->bculr)
- sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+ sh_eth_write(ndev, 0x800, BCULR);
sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
@@ -2610,12 +2671,6 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
}
/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
-static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
- int entry)
-{
- return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
-}
-
static u32 sh_eth_tsu_get_post_mask(int entry)
{
return 0x0f << (28 - ((entry % 8) * 4));
@@ -2630,27 +2685,25 @@ static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int reg = TSU_POST1 + entry / 8;
u32 tmp;
- void *reg_offset;
- reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
- tmp = ioread32(reg_offset);
- iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
+ tmp = sh_eth_tsu_read(mdp, reg);
+ sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
}
static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
int entry)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int reg = TSU_POST1 + entry / 8;
u32 post_mask, ref_mask, tmp;
- void *reg_offset;
- reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
post_mask = sh_eth_tsu_get_post_mask(entry);
ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
- tmp = ioread32(reg_offset);
- iowrite32(tmp & ~post_mask, reg_offset);
+ tmp = sh_eth_tsu_read(mdp, reg);
+ sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
/* If other port enables, the function returns "true" */
return tmp & ref_mask;
@@ -3023,15 +3076,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
pdev->name, pdev->id);
/* register MDIO bus */
- if (dev->of_node) {
- ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
- } else {
- if (pd->phy_irq > 0)
- mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
-
- ret = mdiobus_register(mdp->mii_bus);
- }
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+ ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
if (ret)
goto out_free_bus;
@@ -3130,6 +3178,7 @@ static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
+ { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a5b792ce2ae7..5dee19b61aee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -163,7 +163,7 @@ enum {
};
/* Driver's parameters */
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
#define SH_ETH_RX_ALIGN 32
#else
#define SH_ETH_RX_ALIGN 2
@@ -184,6 +184,7 @@ enum GECMR_BIT {
/* EDMR */
enum DMAC_M_BIT {
+ EDMR_NBST = 0x80,
EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
EDMR_SRST_GETHER = 0x03,
@@ -242,7 +243,7 @@ enum EESR_BIT {
EESR_CND = 0x00000800,
EESR_DLC = 0x00000400,
EESR_CD = 0x00000200,
- EESR_RTO = 0x00000100,
+ EESR_TRO = 0x00000100,
EESR_RMAF = 0x00000080,
EESR_CEEF = 0x00000040,
EESR_CELF = 0x00000020,
@@ -262,7 +263,7 @@ enum EESR_BIT {
EESR_CERF) /* Recv frame CRC error */
#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
- EESR_RTO)
+ EESR_TRO)
#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
EESR_RDE | EESR_RFRMER | EESR_ADE | \
EESR_TFE | EESR_TDE)
@@ -498,20 +499,21 @@ struct sh_eth_cpu_data {
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
- unsigned no_psr:1; /* EtherC DO NOT have PSR */
- unsigned apr:1; /* EtherC have APR */
- unsigned mpr:1; /* EtherC have MPR */
- unsigned tpauser:1; /* EtherC have TPAUSER */
- unsigned bculr:1; /* EtherC have BCULR */
- unsigned tsu:1; /* EtherC have TSU */
- unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
- unsigned rpadir:1; /* E-DMAC have RPADIR */
- unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
- unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+ unsigned no_psr:1; /* EtherC DOES NOT have PSR */
+ unsigned apr:1; /* EtherC has APR */
+ unsigned mpr:1; /* EtherC has MPR */
+ unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned bculr:1; /* EtherC has BCULR */
+ unsigned tsu:1; /* EtherC has TSU */
+ unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */
+ unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC has RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */
+ unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */
unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */
unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */
unsigned hw_checksum:1; /* E-DMAC has CSMR */
- unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
+ unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 056cb6093630..e73e4febeedb 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2738,6 +2738,8 @@ static void rocker_switchdev_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = rocker_world_port_fdb_add(rocker_port, fdb_info);
if (err) {
netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
@@ -2747,6 +2749,8 @@ static void rocker_switchdev_event_work(struct work_struct *work)
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
err = rocker_world_port_fdb_del(rocker_port, fdb_info);
if (err)
netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50daad0a1482..d90a7b1f4088 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
atomic_set(&efx->active_queues, 0);
}
-static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if ((left->match_flags ^ right->match_flags) |
- ((left->flags ^ right->flags) &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
- return false;
-
- return memcmp(&left->outer_vid, &right->outer_vid,
- sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
- (sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
- 0);
- /* XXX should we randomise the initval? */
-}
-
/* Decide whether a filter should be exclusive or else should allow
* delivery to additional recipients. Currently we decide that
* filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
goto out_unlock;
match_pri = rc;
- hash = efx_ef10_filter_hash(spec);
+ hash = efx_filter_spec_hash(spec);
is_mc_recip = efx_filter_is_mc_recipient(spec);
if (is_mc_recip)
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
if (!saved_spec) {
if (ins_index < 0)
ins_index = i;
- } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ } else if (efx_filter_spec_equal(spec, saved_spec)) {
if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
@@ -4762,28 +4739,63 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int filter_idx)
{
+ struct efx_filter_spec *spec, saved_spec;
struct efx_ef10_filter_table *table;
- struct efx_filter_spec *spec;
- bool ret;
+ struct efx_arfs_rule *rule = NULL;
+ bool ret = true, force = false;
+ u16 arfs_id;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
spec = efx_ef10_filter_entry_spec(table, filter_idx);
- if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
- ret = true;
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
goto out_unlock;
- }
- if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
- flow_id, filter_idx)) {
+ spin_lock_bh(&efx->rps_hash_lock);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always return 0 to ARFS. */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, spec);
+ if (!rule)
+ /* ARFS table doesn't know of this filter, so remove it */
+ goto expire;
+ arfs_id = rule->arfs_id;
+ ret = efx_rps_check_rule(rule, filter_idx, &force);
+ if (force)
+ goto expire;
+ if (!ret) {
+ spin_unlock_bh(&efx->rps_hash_lock);
+ goto out_unlock;
+ }
+ }
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
ret = false;
- goto out_unlock;
+ else if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+ saved_spec = *spec; /* remove operation will kfree spec */
+ spin_unlock_bh(&efx->rps_hash_lock);
+ /* At this point (since we dropped the lock), another thread might queue
+ * up a fresh insertion request (but the actual insertion will be held
+ * up by our possession of the filter table lock). In that case, it
+ * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+ * the rule is not removed by efx_rps_hash_del() below.
+ */
+ if (ret)
+ ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
+ /* While we can't safely dereference rule (we dropped the lock), we can
+ * still test it for NULL.
+ */
+ if (ret && rule) {
+ /* Expiring, so remove entry from ARFS table */
+ spin_lock_bh(&efx->rps_hash_lock);
+ efx_rps_hash_del(efx, &saved_spec);
+ spin_unlock_bh(&efx->rps_hash_lock);
}
-
- ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
- filter_idx, true) == 0;
out_unlock:
up_write(&table->lock);
up_read(&efx->filter_sem);
@@ -5265,7 +5277,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
- filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@@ -5334,7 +5346,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
- filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 692dd729ee2a..88c1eee677c2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1551,6 +1551,38 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
+#if defined(CONFIG_SMP)
+static void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+static void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+static void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+static void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel, *end_channel;
@@ -3027,6 +3059,10 @@ static int efx_init_struct(struct efx_nic *efx,
mutex_init(&efx->mac_lock);
#ifdef CONFIG_RFS_ACCEL
mutex_init(&efx->rps_mutex);
+ spin_lock_init(&efx->rps_hash_lock);
+ /* Failure to allocate is not fatal, but may degrade ARFS performance */
+ efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+ sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
@@ -3070,6 +3106,10 @@ static void efx_fini_struct(struct efx_nic *efx)
{
int i;
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_hash_table);
+#endif
+
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
@@ -3092,6 +3132,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
}
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force)
+{
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+ /* ARFS is currently updating this entry, leave it */
+ return false;
+ }
+ if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+ /* ARFS tried and failed to update this, so it's probably out
+ * of date. Remove the filter and the ARFS rule entry.
+ */
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ *force = true;
+ return true;
+ } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+ /* ARFS has moved on, so old filter is not needed. Since we did
+ * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+ * not be removed by efx_rps_hash_del() subsequently.
+ */
+ *force = true;
+ return true;
+ }
+ /* Remove it iff ARFS wants to. */
+ return true;
+}
+
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ u32 hash = efx_filter_spec_hash(spec);
+
+ WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
+ if (!efx->rps_hash_table)
+ return NULL;
+ return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec))
+ return rule;
+ }
+ return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (!head)
+ return NULL;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ *new = false;
+ return rule;
+ }
+ }
+ rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+ *new = true;
+ if (rule) {
+ memcpy(&rule->spec, spec, sizeof(rule->spec));
+ hlist_add_head(&rule->node, head);
+ }
+ return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+ struct efx_arfs_rule *rule;
+ struct hlist_head *head;
+ struct hlist_node *node;
+
+ head = efx_rps_hash_bucket(efx, spec);
+ if (WARN_ON(!head))
+ return;
+ hlist_for_each(node, head) {
+ rule = container_of(node, struct efx_arfs_rule, node);
+ if (efx_filter_spec_equal(spec, &rule->spec)) {
+ /* Someone already reused the entry. We know that if
+ * this check doesn't fire (i.e. filter_id == REMOVING)
+ * then the REMOVING mark was put there by our caller,
+ * because caller is holding a lock on filter table and
+ * only holders of that lock set REMOVING.
+ */
+ if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+ return;
+ hlist_del(node);
+ kfree(rule);
+ return;
+ }
+ }
+ /* We didn't find it. */
+ WARN_ON(1);
+}
+#endif
+
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
@@ -3165,6 +3340,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
cancel_work_sync(&efx->reset_work);
efx_disable_interrupts(efx);
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -3314,6 +3490,8 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
+
+ efx_set_interrupt_affinity(efx);
rc = efx_enable_interrupts(efx);
if (rc)
goto fail6;
@@ -3321,6 +3499,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
return 0;
fail6:
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3140e16fcef..3f759ebdcf10 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#endif
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+ bool *force);
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+
+/* @new is written to indicate if entry was newly added (true) or if an old
+ * entry was found and returned (false).
+ */
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new);
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
/* RSS contexts */
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4a19c7efdf8d..c72adf8b52ea 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table;
- bool ret = false;
+ bool ret = false, force = false;
+ u16 arfs_id;
down_write(&state->lock);
+ spin_lock_bh(&efx->rps_hash_lock);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
- flow_id, index)) {
- efx_farch_filter_table_clear_entry(efx, table, index);
- ret = true;
+ table->spec[index].priority == EFX_FILTER_PRI_HINT) {
+ struct efx_arfs_rule *rule = NULL;
+ struct efx_filter_spec spec;
+
+ efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
+ if (!efx->rps_hash_table) {
+ /* In the absence of the table, we always returned 0 to
+ * ARFS, so use the same to query it.
+ */
+ arfs_id = 0;
+ } else {
+ rule = efx_rps_hash_find(efx, &spec);
+ if (!rule) {
+ /* ARFS table doesn't know of this filter, remove it */
+ force = true;
+ } else {
+ arfs_id = rule->arfs_id;
+ if (!efx_rps_check_rule(rule, index, &force))
+ goto out_unlock;
+ }
+ }
+ if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
+ flow_id, arfs_id)) {
+ if (rule)
+ rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+ efx_rps_hash_del(efx, &spec);
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ ret = true;
+ }
}
-
+out_unlock:
+ spin_unlock_bh(&efx->rps_hash_lock);
up_write(&state->lock);
return ret;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5e379a83c729..65568925c3ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -733,6 +733,56 @@ struct efx_rss_context {
u32 rx_indir_table[128];
};
+#ifdef CONFIG_RFS_ACCEL
+/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
+ * is used to test if filter does or will exist.
+ */
+#define EFX_ARFS_FILTER_ID_PENDING -1
+#define EFX_ARFS_FILTER_ID_ERROR -2
+#define EFX_ARFS_FILTER_ID_REMOVING -3
+/**
+ * struct efx_arfs_rule - record of an ARFS filter and its IDs
+ * @node: linkage into hash table
+ * @spec: details of the filter (used as key for hash table). Use efx->type to
+ * determine which member to use.
+ * @rxq_index: channel to which the filter will steer traffic.
+ * @arfs_id: filter ID which was returned to ARFS
+ * @filter_id: index in software filter table. May be
+ * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
+ * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
+ * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
+ */
+struct efx_arfs_rule {
+ struct hlist_node node;
+ struct efx_filter_spec spec;
+ u16 rxq_index;
+ u16 arfs_id;
+ s32 filter_id;
+};
+
+/* Size chosen so that the table is one page (4kB) */
+#define EFX_ARFS_HASH_TABLE_SIZE 512
+
+/**
+ * struct efx_async_filter_insertion - Request to asynchronously insert a filter
+ * @net_dev: Reference to the netdevice
+ * @spec: The filter to insert
+ * @work: Workitem for this request
+ * @rxq_index: Identifies the channel for which this request was made
+ * @flow_id: Identifies the kernel-side flow for which this request was made
+ */
+struct efx_async_filter_insertion {
+ struct net_device *net_dev;
+ struct efx_filter_spec spec;
+ struct work_struct work;
+ u16 rxq_index;
+ u32 flow_id;
+};
+
+/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
+#define EFX_RPS_MAX_IN_FLIGHT 8
+#endif /* CONFIG_RFS_ACCEL */
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -850,6 +900,12 @@ struct efx_rss_context {
* @rps_expire_channel: Next channel to check for expiry
* @rps_expire_index: Next index to check for expiry in
* @rps_expire_channel's @rps_flow_id
+ * @rps_slot_map: bitmap of in-flight entries in @rps_slot
+ * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
+ * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
+ * @rps_next_id).
+ * @rps_hash_table: Mapping between ARFS filters and their various IDs
+ * @rps_next_id: next arfs_id for an ARFS filter
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1004,6 +1060,11 @@ struct efx_nic {
struct mutex rps_mutex;
unsigned int rps_expire_channel;
unsigned int rps_expire_index;
+ unsigned long rps_slot_map;
+ struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
+ spinlock_t rps_hash_lock;
+ struct hlist_head *rps_hash_table;
+ u32 rps_next_id;
#endif
atomic_t active_queues;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 95682831484e..d2e254f2f72b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -827,31 +827,38 @@ MODULE_PARM_DESC(rx_refill_threshold,
#ifdef CONFIG_RFS_ACCEL
-/**
- * struct efx_async_filter_insertion - Request to asynchronously insert a filter
- * @net_dev: Reference to the netdevice
- * @spec: The filter to insert
- * @work: Workitem for this request
- * @rxq_index: Identifies the channel for which this request was made
- * @flow_id: Identifies the kernel-side flow for which this request was made
- */
-struct efx_async_filter_insertion {
- struct net_device *net_dev;
- struct efx_filter_spec spec;
- struct work_struct work;
- u16 rxq_index;
- u32 flow_id;
-};
-
static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
struct efx_nic *efx = netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+ int slot_idx = req - efx->rps_slot;
+ struct efx_arfs_rule *rule;
+ u16 arfs_id = 0;
int rc;
- rc = efx->type->filter_insert(efx, &req->spec, false);
+ rc = efx->type->filter_insert(efx, &req->spec, true);
+ if (rc >= 0)
+ rc %= efx->type->max_rx_ip_filters;
+ if (efx->rps_hash_table) {
+ spin_lock_bh(&efx->rps_hash_lock);
+ rule = efx_rps_hash_find(efx, &req->spec);
+ /* The rule might have already gone, if someone else's request
+ * for the same spec was already worked and then expired before
+ * we got around to our work. In that case we have nothing
+ * tying us to an arfs_id, meaning that as soon as the filter
+ * is considered for expiry it will be removed.
+ */
+ if (rule) {
+ if (rc < 0)
+ rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+ else
+ rule->filter_id = rc;
+ arfs_id = rule->arfs_id;
+ }
+ spin_unlock_bh(&efx->rps_hash_lock);
+ }
if (rc >= 0) {
/* Remember this so we can check whether to expire the filter
* later.
@@ -863,23 +870,23 @@ static void efx_filter_rfs_work(struct work_struct *data)
if (req->spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
- req->rxq_index, req->flow_id, rc);
+ req->rxq_index, req->flow_id, rc, arfs_id);
else
netif_info(efx, rx_status, efx->net_dev,
- "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
- req->rxq_index, req->flow_id, rc);
+ req->rxq_index, req->flow_id, rc, arfs_id);
}
/* Release references */
+ clear_bit(slot_idx, &efx->rps_slot_map);
dev_put(req->net_dev);
- kfree(req);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -887,23 +894,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
+ struct efx_arfs_rule *rule;
struct flow_keys fk;
+ int slot_idx;
+ bool new;
+ int rc;
- if (flow_id == RPS_FLOW_ID_INVALID)
- return -EINVAL;
+ /* find a free slot */
+ for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
+ if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
+ break;
+ if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
+ return -EBUSY;
- if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
- return -EPROTONOSUPPORT;
+ if (flow_id == RPS_FLOW_ID_INVALID) {
+ rc = -EINVAL;
+ goto out_clear;
+ }
- if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
- if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
- return -EPROTONOSUPPORT;
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
- req = kmalloc(sizeof(*req), GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
+ rc = -EPROTONOSUPPORT;
+ goto out_clear;
+ }
+ req = efx->rps_slot + slot_idx;
efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
@@ -927,12 +950,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
req->spec.rem_port = fk.ports.src;
req->spec.loc_port = fk.ports.dst;
+ if (efx->rps_hash_table) {
+ /* Add it to ARFS hash table */
+ spin_lock(&efx->rps_hash_lock);
+ rule = efx_rps_hash_add(efx, &req->spec, &new);
+ if (!rule) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ if (new)
+ rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+ rc = rule->arfs_id;
+ /* Skip if existing or pending filter already does the right thing */
+ if (!new && rule->rxq_index == rxq_index &&
+ rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+ goto out_unlock;
+ rule->rxq_index = rxq_index;
+ rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+ spin_unlock(&efx->rps_hash_lock);
+ } else {
+ /* Without an ARFS hash table, we just use arfs_id 0 for all
+ * filters. This means if multiple flows hash to the same
+ * flow_id, all but the most recently touched will be eligible
+ * for expiry.
+ */
+ rc = 0;
+ }
+
+ /* Queue the request */
dev_hold(req->net_dev = net_dev);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
schedule_work(&req->work);
- return 0;
+ return rc;
+out_unlock:
+ spin_unlock(&efx->rps_hash_lock);
+out_clear:
+ clear_bit(slot_idx, &efx->rps_slot_map);
+ return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
index 6bcfe27fc560..b80048ca82a0 100644
--- a/drivers/net/ethernet/socionext/Kconfig
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -14,6 +14,8 @@ if NET_VENDOR_SOCIONEXT
config SNI_AVE
tristate "Socionext AVE ethernet support"
depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ depends on HAS_IOMEM
+ select MFD_SYSCON
select PHYLIB
---help---
Driver for gigabit ethernet MACs, called AVE, in the
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 0b3b7a460641..f7ecceeb1e28 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -18,6 +19,7 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
@@ -197,8 +199,16 @@
#define AVE_INTM_COUNT 20
#define AVE_FORCE_TXINTCNT 1
+/* SG */
+#define SG_ETPINMODE 0x540
+#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */
+#define SG_ETPINMODE_RMII(ins) BIT(ins)
+
#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
+#define AVE_MAX_CLKS 4
+#define AVE_MAX_RSTS 2
+
enum desc_id {
AVE_DESCID_RX,
AVE_DESCID_TX,
@@ -225,10 +235,6 @@ struct ave_desc_info {
struct ave_desc *desc; /* skb info related descriptor */
};
-struct ave_soc_data {
- bool is_desc_64bit;
-};
-
struct ave_stats {
struct u64_stats_sync syncp;
u64 packets;
@@ -245,11 +251,16 @@ struct ave_private {
int phy_id;
unsigned int desc_size;
u32 msg_enable;
- struct clk *clk;
- struct reset_control *rst;
+ int nclks;
+ struct clk *clk[AVE_MAX_CLKS];
+ int nrsts;
+ struct reset_control *rst[AVE_MAX_RSTS];
phy_interface_t phy_mode;
struct phy_device *phydev;
struct mii_bus *mdio;
+ struct regmap *regmap;
+ unsigned int pinmode_mask;
+ unsigned int pinmode_val;
/* stats */
struct ave_stats stats_rx;
@@ -272,6 +283,14 @@ struct ave_private {
const struct ave_soc_data *data;
};
+struct ave_soc_data {
+ bool is_desc_64bit;
+ const char *clock_names[AVE_MAX_CLKS];
+ const char *reset_names[AVE_MAX_RSTS];
+ int (*get_pinmode)(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg);
+};
+
static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
int offset)
{
@@ -1153,20 +1172,30 @@ static int ave_init(struct net_device *ndev)
struct device_node *np = dev->of_node;
struct device_node *mdio_np;
struct phy_device *phydev;
- int ret;
+ int nc, nr, ret;
/* enable clk because of hw access until ndo_open */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "can't enable clock\n");
- return ret;
+ for (nc = 0; nc < priv->nclks; nc++) {
+ ret = clk_prepare_enable(priv->clk[nc]);
+ if (ret) {
+ dev_err(dev, "can't enable clock\n");
+ goto out_clk_disable;
+ }
}
- ret = reset_control_deassert(priv->rst);
- if (ret) {
- dev_err(dev, "can't deassert reset\n");
- goto out_clk_disable;
+
+ for (nr = 0; nr < priv->nrsts; nr++) {
+ ret = reset_control_deassert(priv->rst[nr]);
+ if (ret) {
+ dev_err(dev, "can't deassert reset\n");
+ goto out_reset_assert;
+ }
}
+ ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
+ priv->pinmode_mask, priv->pinmode_val);
+ if (ret)
+ return ret;
+
ave_global_reset(ndev);
mdio_np = of_get_child_by_name(np, "mdio");
@@ -1207,9 +1236,11 @@ static int ave_init(struct net_device *ndev)
out_mdio_unregister:
mdiobus_unregister(priv->mdio);
out_reset_assert:
- reset_control_assert(priv->rst);
+ while (--nr >= 0)
+ reset_control_assert(priv->rst[nr]);
out_clk_disable:
- clk_disable_unprepare(priv->clk);
+ while (--nc >= 0)
+ clk_disable_unprepare(priv->clk[nc]);
return ret;
}
@@ -1217,13 +1248,16 @@ out_clk_disable:
static void ave_uninit(struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
+ int i;
phy_disconnect(priv->phydev);
mdiobus_unregister(priv->mdio);
/* disable clk because of hw access after ndo_stop */
- reset_control_assert(priv->rst);
- clk_disable_unprepare(priv->clk);
+ for (i = 0; i < priv->nrsts; i++)
+ reset_control_assert(priv->rst[i]);
+ for (i = 0; i < priv->nclks; i++)
+ clk_disable_unprepare(priv->clk[i]);
}
static int ave_open(struct net_device *ndev)
@@ -1520,6 +1554,7 @@ static int ave_probe(struct platform_device *pdev)
const struct ave_soc_data *data;
struct device *dev = &pdev->dev;
char buf[ETHTOOL_FWVERS_LEN];
+ struct of_phandle_args args;
phy_interface_t phy_mode;
struct ave_private *priv;
struct net_device *ndev;
@@ -1527,8 +1562,9 @@ static int ave_probe(struct platform_device *pdev)
struct resource *res;
const void *mac_addr;
void __iomem *base;
+ const char *name;
+ int i, irq, ret;
u64 dma_mask;
- int irq, ret;
u32 ave_id;
data = of_device_get_match_data(dev);
@@ -1541,12 +1577,6 @@ static int ave_probe(struct platform_device *pdev)
dev_err(dev, "phy-mode not found\n");
return -EINVAL;
}
- if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
- phy_mode != PHY_INTERFACE_MODE_RMII &&
- phy_mode != PHY_INTERFACE_MODE_MII) {
- dev_err(dev, "phy-mode is invalid\n");
- return -EINVAL;
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -1614,15 +1644,47 @@ static int ave_probe(struct platform_device *pdev)
u64_stats_init(&priv->stats_tx.syncp);
u64_stats_init(&priv->stats_rx.syncp);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto out_free_netdev;
+ for (i = 0; i < AVE_MAX_CLKS; i++) {
+ name = priv->data->clock_names[i];
+ if (!name)
+ break;
+ priv->clk[i] = devm_clk_get(dev, name);
+ if (IS_ERR(priv->clk[i])) {
+ ret = PTR_ERR(priv->clk[i]);
+ goto out_free_netdev;
+ }
+ priv->nclks++;
+ }
+
+ for (i = 0; i < AVE_MAX_RSTS; i++) {
+ name = priv->data->reset_names[i];
+ if (!name)
+ break;
+ priv->rst[i] = devm_reset_control_get_shared(dev, name);
+ if (IS_ERR(priv->rst[i])) {
+ ret = PTR_ERR(priv->rst[i]);
+ goto out_free_netdev;
+ }
+ priv->nrsts++;
}
- priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
- if (IS_ERR(priv->rst)) {
- ret = PTR_ERR(priv->rst);
+ ret = of_parse_phandle_with_fixed_args(np,
+ "socionext,syscon-phy-mode",
+ 1, 0, &args);
+ if (ret) {
+ netdev_err(ndev, "can't get syscon-phy-mode property\n");
+ goto out_free_netdev;
+ }
+ priv->regmap = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(priv->regmap)) {
+ netdev_err(ndev, "can't map syscon-phy-mode\n");
+ ret = PTR_ERR(priv->regmap);
+ goto out_free_netdev;
+ }
+ ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
+ if (ret) {
+ netdev_err(ndev, "invalid phy-mode setting\n");
goto out_free_netdev;
}
@@ -1685,24 +1747,148 @@ static int ave_remove(struct platform_device *pdev)
return 0;
}
+static int ave_pro4_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(0);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_ld11_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ priv->pinmode_val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_ld20_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 0)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(0);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ave_pxs3_get_pinmode(struct ave_private *priv,
+ phy_interface_t phy_mode, u32 arg)
+{
+ if (arg > 1)
+ return -EINVAL;
+
+ priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ priv->pinmode_val = SG_ETPINMODE_RMII(arg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ priv->pinmode_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct ave_soc_data ave_pro4_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "gio", "ether", "ether-gb", "ether-phy",
+ },
+ .reset_names = {
+ "gio", "ether",
+ },
+ .get_pinmode = ave_pro4_get_pinmode,
};
static const struct ave_soc_data ave_pxs2_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pro4_get_pinmode,
};
static const struct ave_soc_data ave_ld11_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_ld11_get_pinmode,
};
static const struct ave_soc_data ave_ld20_data = {
.is_desc_64bit = true,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_ld20_get_pinmode,
};
static const struct ave_soc_data ave_pxs3_data = {
.is_desc_64bit = false,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pxs3_get_pinmode,
};
static const struct of_device_id of_ave_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 972e4ef6d414..68e9e2640c62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,7 +4,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
- dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o $(stmmac-y)
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
+ stmmac_tc.o $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 59673c6d2e52..a679cb729d1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -39,6 +39,7 @@
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
@@ -352,6 +353,10 @@ struct dma_features {
unsigned int rx_fifo_size;
/* Automotive Safety Package */
unsigned int asp;
+ /* RX Parser */
+ unsigned int frpsel;
+ unsigned int frpbs;
+ unsigned int frpes;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -411,6 +416,7 @@ struct mac_device_info {
const struct stmmac_dma_ops *dma;
const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
+ const struct stmmac_tc_ops *tc;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
@@ -428,12 +434,9 @@ struct stmmac_rx_routing {
u32 reg_shift;
};
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries,
- int *synopsys_id);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
-struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries, int *synopsys_id);
+int dwmac100_setup(struct stmmac_priv *priv);
+int dwmac1000_setup(struct stmmac_priv *priv);
+int dwmac4_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
@@ -453,24 +456,4 @@ extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
extern const struct stmmac_desc_ops dwmac4_desc_ops;
-/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static inline u32 stmmac_get_synopsys_id(u32 hwid)
-{
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 7cb794094a70..4ff231df7322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -29,6 +30,10 @@
#define PRG_ETH0_RGMII_MODE BIT(0)
+#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
+#define PRG_ETH0_EXT_RGMII_MODE 1
+#define PRG_ETH0_EXT_RMII_MODE 4
+
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
@@ -47,12 +52,20 @@
#define MUX_CLK_NUM_PARENTS 2
+struct meson8b_dwmac;
+
+struct meson8b_dwmac_data {
+ int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+};
+
struct meson8b_dwmac {
- struct device *dev;
- void __iomem *regs;
- phy_interface_t phy_mode;
- struct clk *rgmii_tx_clk;
- u32 tx_delay_ns;
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct meson8b_dwmac_data *data;
+ phy_interface_t phy_mode;
+ struct clk *rgmii_tx_clk;
+ u32 tx_delay_ns;
};
struct meson8b_dwmac_clk_configs {
@@ -171,6 +184,59 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE,
+ PRG_ETH0_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE, 0);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RMII_MODE);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
@@ -188,10 +254,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* enable RGMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
- PRG_ETH0_RGMII_MODE);
-
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
@@ -224,10 +286,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
break;
case PHY_INTERFACE_MODE_RMII:
- /* disable RGMII mode -> enables RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
- 0);
-
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
@@ -274,6 +332,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct meson8b_dwmac_data *)
+ of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->regs)) {
@@ -298,6 +361,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_remove_config_dt;
+ ret = dwmac->data->set_phy_mode(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_prg_eth(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -316,10 +383,31 @@ err_remove_config_dt:
return ret;
}
+static const struct meson8b_dwmac_data meson8b_dwmac_data = {
+ .set_phy_mode = meson8b_set_phy_mode,
+};
+
+static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+};
+
static const struct of_device_id meson8b_dwmac_match[] = {
- { .compatible = "amlogic,meson8b-dwmac" },
- { .compatible = "amlogic,meson8m2-dwmac" },
- { .compatible = "amlogic,meson-gxbb-dwmac" },
+ {
+ .compatible = "amlogic,meson8b-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-dwmac",
+ .data = &meson_axg_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 13133b30b575..f08625a02cea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1104,30 +1104,20 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
} else {
if (bsp_priv->clk_enabled) {
if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- if (!IS_ERR(bsp_priv->mac_clk_rx))
- clk_disable_unprepare(
- bsp_priv->mac_clk_rx);
+ clk_disable_unprepare(bsp_priv->mac_clk_rx);
- if (!IS_ERR(bsp_priv->clk_mac_ref))
- clk_disable_unprepare(
- bsp_priv->clk_mac_ref);
+ clk_disable_unprepare(bsp_priv->clk_mac_ref);
- if (!IS_ERR(bsp_priv->clk_mac_refout))
- clk_disable_unprepare(
- bsp_priv->clk_mac_refout);
+ clk_disable_unprepare(bsp_priv->clk_mac_refout);
}
- if (!IS_ERR(bsp_priv->clk_phy))
- clk_disable_unprepare(bsp_priv->clk_phy);
+ clk_disable_unprepare(bsp_priv->clk_phy);
- if (!IS_ERR(bsp_priv->aclk_mac))
- clk_disable_unprepare(bsp_priv->aclk_mac);
+ clk_disable_unprepare(bsp_priv->aclk_mac);
- if (!IS_ERR(bsp_priv->pclk_mac))
- clk_disable_unprepare(bsp_priv->pclk_mac);
+ clk_disable_unprepare(bsp_priv->pclk_mac);
- if (!IS_ERR(bsp_priv->mac_clk_tx))
- clk_disable_unprepare(bsp_priv->mac_clk_tx);
+ clk_disable_unprepare(bsp_priv->mac_clk_tx);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a3fa65b1ca8e..2e6e2a96b4f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -42,17 +42,27 @@
* This value is used for disabling properly EMAC
* and used as a good starting value in case of the
* boot process(uboot) leave some stuff.
+ * @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
* @support_rmii: Does the MAC handle RMII
* @support_rgmii: Does the MAC handle RGMII
+ *
+ * @rx_delay_max: Maximum raw value for RX delay chain
+ * @tx_delay_max: Maximum raw value for TX delay chain
+ * These two also indicate the bitmask for
+ * the RX and TX delay chain registers. A
+ * value of zero indicates this is not supported.
*/
struct emac_variant {
u32 default_syscon_value;
+ const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
bool support_rmii;
bool support_rgmii;
+ u8 rx_delay_max;
+ u8 tx_delay_max;
};
/* struct sunxi_priv_data - hold all sunxi private data
@@ -71,38 +81,70 @@ struct sunxi_priv_data {
struct regulator *regulator;
struct reset_control *rst_ephy;
const struct emac_variant *variant;
- struct regmap *regmap;
+ struct regmap_field *regmap_field;
bool internal_phy_powered;
void *mux_handle;
};
+/* EMAC clock register @ 0x30 in the "system control" address range */
+static const struct reg_field sun8i_syscon_reg_field = {
+ .reg = 0x30,
+ .lsb = 0,
+ .msb = 31,
+};
+
+/* EMAC clock register @ 0x164 in the CCU address range */
+static const struct reg_field sun8i_ccu_reg_field = {
+ .reg = 0x164,
+ .lsb = 0,
+ .msb = 31,
+};
+
static const struct emac_variant emac_variant_h3 = {
.default_syscon_value = 0x58000,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
.support_rmii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
};
static const struct emac_variant emac_variant_v3s = {
.default_syscon_value = 0x38000,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
.default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_r40 = {
+ .default_syscon_value = 0,
+ .syscon_field = &sun8i_ccu_reg_field,
+ .support_mii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 7,
};
static const struct emac_variant emac_variant_a64 = {
.default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
.support_rmii = true,
- .support_rgmii = true
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
};
#define EMAC_BASIC_CTL0 0x00
@@ -206,9 +248,7 @@ static const struct emac_variant emac_variant_a64 = {
#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
/* Generic system control EMAC_CLK bits */
-#define SYSCON_ETXDC_MASK GENMASK(2, 0)
#define SYSCON_ETXDC_SHIFT 10
-#define SYSCON_ERXDC_MASK GENMASK(4, 0)
#define SYSCON_ERXDC_SHIFT 5
/* EMAC PHY Interface Type */
#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
@@ -216,7 +256,6 @@ static const struct emac_variant emac_variant_a64 = {
#define SYSCON_ETCS_MII 0x0
#define SYSCON_ETCS_EXT_GMII 0x1
#define SYSCON_ETCS_INT_GMII 0x2
-#define SYSCON_EMAC_REG 0x30
/* sun8i_dwmac_dma_reset() - reset the EMAC
* Called from stmmac via stmmac_dma_ops->reset
@@ -237,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
- /* Write TX and RX descriptors address */
- writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
- writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
-
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
}
+static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* Write RX descriptors address */
+ writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
+}
+
+static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* Write TX descriptors address */
+ writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
+}
+
/* sun8i_dwmac_dump_regs() - Dump EMAC address space
* Called from stmmac_dma_ops->dump_regs
* Used for ethtool
@@ -398,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (mode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (mode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (mode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (mode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (mode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 v;
v = readl(ioaddr + EMAC_TX_CTL1);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
v |= EMAC_TX_MD;
/* Undocumented bit (called TX_NEXT_FRM in BSP), the original
* comment is
@@ -415,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
} else {
v &= ~EMAC_TX_MD;
v &= ~EMAC_TX_TH_MASK;
- if (txmode < 64)
+ if (mode < 64)
v |= EMAC_TX_TH_64;
- else if (txmode < 128)
+ else if (mode < 128)
v |= EMAC_TX_TH_128;
- else if (txmode < 192)
+ else if (mode < 192)
v |= EMAC_TX_TH_192;
- else if (txmode < 256)
+ else if (mode < 256)
v |= EMAC_TX_TH_256;
}
writel(v, ioaddr + EMAC_TX_CTL1);
-
- v = readl(ioaddr + EMAC_RX_CTL1);
- if (rxmode == SF_DMA_MODE) {
- v |= EMAC_RX_MD;
- } else {
- v &= ~EMAC_RX_MD;
- v &= ~EMAC_RX_TH_MASK;
- if (rxmode < 32)
- v |= EMAC_RX_TH_32;
- else if (rxmode < 64)
- v |= EMAC_RX_TH_64;
- else if (rxmode < 96)
- v |= EMAC_RX_TH_96;
- else if (rxmode < 128)
- v |= EMAC_RX_TH_128;
- }
- writel(v, ioaddr + EMAC_RX_CTL1);
}
static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.reset = sun8i_dwmac_dma_reset,
.init = sun8i_dwmac_dma_init,
+ .init_rx_chan = sun8i_dwmac_dma_init_rx,
+ .init_tx_chan = sun8i_dwmac_dma_init_tx,
.dump_regs = sun8i_dwmac_dump_regs,
- .dma_mode = sun8i_dwmac_dma_operation_mode,
+ .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx,
+ .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx,
.enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
.enable_dma_irq = sun8i_dwmac_enable_dma_irq,
.disable_dma_irq = sun8i_dwmac_disable_dma_irq,
@@ -745,7 +804,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
bool need_power_ephy = false;
if (current_child ^ desired_child) {
- regmap_read(gmac->regmap, SYSCON_EMAC_REG, &reg);
+ regmap_field_read(gmac->regmap_field, &reg);
switch (desired_child) {
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
@@ -763,7 +822,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
desired_child);
return -EINVAL;
}
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, val);
+ regmap_field_write(gmac->regmap_field, val);
if (need_power_ephy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
@@ -801,7 +860,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
int ret;
u32 reg, val;
- regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
+ regmap_field_read(gmac->regmap_field, &val);
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,
@@ -835,8 +894,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
}
val /= 100;
dev_dbg(priv->device, "set tx-delay to %x\n", val);
- if (val <= SYSCON_ETXDC_MASK) {
- reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
+ if (val <= gmac->variant->tx_delay_max) {
+ reg &= ~(gmac->variant->tx_delay_max <<
+ SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid TX clock delay: %d\n",
@@ -852,8 +912,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
}
val /= 100;
dev_dbg(priv->device, "set rx-delay to %x\n", val);
- if (val <= SYSCON_ERXDC_MASK) {
- reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
+ if (val <= gmac->variant->rx_delay_max) {
+ reg &= ~(gmac->variant->rx_delay_max <<
+ SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(priv->device, "Invalid RX clock delay: %d\n",
@@ -883,7 +944,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
return -EINVAL;
}
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+ regmap_field_write(gmac->regmap_field, reg);
return 0;
}
@@ -892,7 +953,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
u32 reg = gmac->variant->default_syscon_value;
- regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
+ regmap_field_write(gmac->regmap_field, reg);
}
static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
@@ -971,6 +1032,34 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
return mac;
}
+static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
+{
+ struct device_node *syscon_node;
+ struct platform_device *syscon_pdev;
+ struct regmap *regmap = NULL;
+
+ syscon_node = of_parse_phandle(node, "syscon", 0);
+ if (!syscon_node)
+ return ERR_PTR(-ENODEV);
+
+ syscon_pdev = of_find_device_by_node(syscon_node);
+ if (!syscon_pdev) {
+ /* platform device might not be probed yet */
+ regmap = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_node;
+ }
+
+ /* If no regmap is found then the other device driver is at fault */
+ regmap = dev_get_regmap(&syscon_pdev->dev, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-EINVAL);
+
+ platform_device_put(syscon_pdev);
+out_put_node:
+ of_node_put(syscon_node);
+ return regmap;
+}
+
static int sun8i_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -980,6 +1069,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
+ struct regmap *regmap;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1014,14 +1104,41 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
gmac->regulator = NULL;
}
- gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "syscon");
- if (IS_ERR(gmac->regmap)) {
- ret = PTR_ERR(gmac->regmap);
+ /* The "GMAC clock control" register might be located in the
+ * CCU address range (on the R40), or the system control address
+ * range (on most other sun8i and later SoCs).
+ *
+ * The former controls most if not all clocks in the SoC. The
+ * latter has an SoC identification register, and on some SoCs,
+ * controls to map device specific SRAM to either the intended
+ * peripheral, or the CPU address space.
+ *
+ * In either case, there should be a coordinated and restricted
+ * method of accessing the register needed here. This is done by
+ * having the device export a custom regmap, instead of a generic
+ * syscon, which grants all access to all registers.
+ *
+ * To support old device trees, we fall back to using the syscon
+ * interface if possible.
+ */
+ regmap = sun8i_dwmac_get_syscon_from_dev(pdev->dev.of_node);
+ if (IS_ERR(regmap))
+ regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
return ret;
}
+ gmac->regmap_field = devm_regmap_field_alloc(dev, regmap,
+ *gmac->variant->syscon_field);
+ if (IS_ERR(gmac->regmap_field)) {
+ ret = PTR_ERR(gmac->regmap_field);
+ dev_err(dev, "Unable to map syscon register: %d\n", ret);
+ return ret;
+ }
+
plat_dat->interface = of_get_phy_mode(dev->of_node);
/* platform data specifying hardware features and callbacks.
@@ -1078,6 +1195,8 @@ static const struct of_device_id sun8i_dwmac_match[] = {
.data = &emac_variant_v3s },
{ .compatible = "allwinner,sun8i-a83t-emac",
.data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun8i-r40-gmac",
+ .data = &emac_variant_r40 },
{ .compatible = "allwinner,sun50i-a64-emac",
.data = &emac_variant_a64 },
{ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index c02d36629c52..184ca13c8f79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -29,7 +29,6 @@
#define GMAC_MII_DATA 0x00000014 /* MII Data */
#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
-#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC_DEBUG 0x00000024 /* GMAC debug register */
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index ef10baf14186..0877bde6e860 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -27,6 +27,7 @@
#include <linux/ethtool.h>
#include <net/dsa.h>
#include <asm/io.h>
+#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac1000.h"
@@ -498,7 +499,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
-static const struct stmmac_ops dwmac1000_ops = {
+const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac1000_rx_ipc_enable,
@@ -519,28 +520,21 @@ static const struct stmmac_ops dwmac1000_ops = {
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
};
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries,
- int *synopsys_id)
+int dwmac1000_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
- u32 hwid = readl(ioaddr + GMAC_VERSION);
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
+ dev_info(priv->device, "\tDWMAC1000\n");
- mac->pcsr = ioaddr;
- mac->multicast_filter_bins = mcbins;
- mac->unicast_filter_entries = perfect_uc_entries;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
mac->mcast_bits_log2 = 0;
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->mac = &dwmac1000_ops;
- mac->dma = &dwmac1000_dma_ops;
-
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
@@ -555,8 +549,5 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->mii.clk_csr_shift = 2;
mac->mii.clk_csr_mask = GENMASK(5, 2);
- /* Get and dump the chip ID */
- *synopsys_id = stmmac_get_synopsys_id(hwid);
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 7ecf549c7f1c..aacc4aa80e3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
}
static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
@@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base address lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base address list must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base address list must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
return csr6;
}
-static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (mode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ /* Configure flow control based on rx fifo size */
+ csr6 = dwmac1000_configure_fc(csr6, fifosz);
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode == SF_DMA_MODE) {
+ if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
@@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
*/
csr6 |= DMA_CONTROL_OSF;
} else {
- pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
/* Set the transmit threshold */
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
- else if (txmode <= 128)
+ else if (mode <= 128)
csr6 |= DMA_CONTROL_TTC_128;
- else if (txmode <= 192)
+ else if (mode <= 192)
csr6 |= DMA_CONTROL_TTC_192;
else
csr6 |= DMA_CONTROL_TTC_256;
}
- if (rxmode == SF_DMA_MODE) {
- pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
- } else {
- pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
- csr6 &= ~DMA_CONTROL_RSF;
- csr6 &= DMA_CONTROL_TC_RX_MASK;
- if (rxmode <= 32)
- csr6 |= DMA_CONTROL_RTC_32;
- else if (rxmode <= 64)
- csr6 |= DMA_CONTROL_RTC_64;
- else if (rxmode <= 96)
- csr6 |= DMA_CONTROL_RTC_96;
- else
- csr6 |= DMA_CONTROL_RTC_128;
- }
-
- /* Configure flow control based on rx fifo size */
- csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
-
writel(csr6, ioaddr + DMA_CONTROL);
}
@@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
+ .init_rx_chan = dwmac1000_dma_init_rx,
+ .init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs,
- .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
+ .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 91b23f9db31a..b735143987e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -27,6 +27,7 @@
#include <linux/crc32.h>
#include <net/dsa.h>
#include <asm/io.h>
+#include "stmmac.h"
#include "dwmac100.h"
static void dwmac100_core_init(struct mac_device_info *hw,
@@ -159,7 +160,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
return;
}
-static const struct stmmac_ops dwmac100_ops = {
+const struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac100_rx_ipc_enable,
@@ -172,20 +173,13 @@ static const struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
+int dwmac100_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
-
- pr_info("\tDWMAC100\n");
-
- mac->pcsr = ioaddr;
- mac->mac = &dwmac100_ops;
- mac->dma = &dwmac100_dma_ops;
+ dev_info(priv->device, "\tDWMAC100\n");
+ mac->pcsr = priv->ioaddr;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
@@ -200,8 +194,5 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->mii.clk_csr_shift = 2;
mac->mii.clk_csr_mask = GENMASK(5, 2);
- /* Synopsys Id is not available on old chips */
- *synopsys_id = 0;
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 6502b9aa3bf5..21dee25ee570 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -29,8 +29,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
- /* RX/TX descriptor base addr lists must be written into
- * DMA CSR3 and CSR4, respectively
- */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+static void dwmac100_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base addr lists must be written into DMA CSR3 */
+ writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac100_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base addr lists must be written into DMA CSR4 */
+ writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
}
/* Store and Forward capability is not used at all.
@@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
* The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register.
*/
-static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode, int rxfifosz)
+static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
- if (txmode <= 32)
+ if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
+ else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
else
csr6 |= DMA_CONTROL_TTC_128;
@@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
const struct stmmac_dma_ops dwmac100_dma_ops = {
.reset = dwmac_dma_reset,
.init = dwmac100_dma_init,
+ .init_rx_chan = dwmac100_dma_init_rx,
+ .init_tx_chan = dwmac100_dma_init_tx,
.dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
+ .dma_tx_mode = dwmac100_dma_operation_mode_tx,
.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index c7bff596c665..6330a55953df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -34,7 +34,6 @@
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
-#define GMAC_VERSION 0x00000110
#define GMAC_DEBUG 0x00000114
#define GMAC_HW_FEATURE0 0x0000011c
#define GMAC_HW_FEATURE1 0x00000120
@@ -195,6 +194,9 @@ enum power_event {
/* MAC HW features3 bitmap */
#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
+#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
+#define GMAC_HW_FEAT_FRPSEL BIT(10)
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
@@ -203,6 +205,7 @@ enum power_event {
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_FRPE BIT(15)
#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
@@ -347,7 +350,7 @@ enum power_event {
#define MTL_RX_OVERFLOW_INT BIT(16)
/* Default operating mode of the MAC */
-#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
/* To dump the core regs excluding the Address Registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a3af92ebbca8..a7121a7d9391 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,6 +18,7 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <net/dsa.h>
+#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -31,13 +32,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_CORE_INIT;
- /* Clear ACS bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev))
- value &= ~GMAC_CONFIG_ACS;
-
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)
@@ -707,7 +701,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
x->mac_gmii_rx_proto_engine++;
}
-static const struct stmmac_ops dwmac4_ops = {
+const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -738,7 +732,7 @@ static const struct stmmac_ops dwmac4_ops = {
.set_filter = dwmac4_set_filter,
};
-static const struct stmmac_ops dwmac410_ops = {
+const struct stmmac_ops dwmac410_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -769,7 +763,7 @@ static const struct stmmac_ops dwmac410_ops = {
.set_filter = dwmac4_set_filter,
};
-static const struct stmmac_ops dwmac510_ops = {
+const struct stmmac_ops dwmac510_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
@@ -801,21 +795,19 @@ static const struct stmmac_ops dwmac510_ops = {
.safety_feat_config = dwmac5_safety_feat_config,
.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
.safety_feat_dump = dwmac5_safety_feat_dump,
+ .rxp_config = dwmac5_rxp_config,
};
-struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries, int *synopsys_id)
+int dwmac4_setup(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
- u32 hwid = readl(ioaddr + GMAC_VERSION);
+ struct mac_device_info *mac = priv->hw;
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- if (!mac)
- return NULL;
+ dev_info(priv->device, "\tDWMAC4/5\n");
- mac->pcsr = ioaddr;
- mac->multicast_filter_bins = mcbins;
- mac->unicast_filter_entries = perfect_uc_entries;
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
mac->mcast_bits_log2 = 0;
if (mac->multicast_filter_bins)
@@ -835,20 +827,5 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- /* Get and dump the chip ID */
- *synopsys_id = stmmac_get_synopsys_id(hwid);
-
- if (*synopsys_id > DWMAC_CORE_4_00)
- mac->dma = &dwmac410_dma_ops;
- else
- mac->dma = &dwmac4_dma_ops;
-
- if (*synopsys_id >= DWMAC_CORE_5_10)
- mac->mac = &dwmac510_ops;
- else if (*synopsys_id >= DWMAC_CORE_4_00)
- mac->mac = &dwmac410_ops;
- else
- mac->mac = &dwmac4_ops;
-
- return mac;
+ return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 65ed896c13cb..20299f6f65fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
p->des3 |= cpu_to_le32(TDES3_OWN);
}
-static void dwmac4_set_rx_owner(struct dma_desc *p)
+static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 |= cpu_to_le32(RDES3_OWN);
+ p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
}
static int dwmac4_get_tx_ls(struct dma_desc *p)
@@ -292,10 +295,7 @@ exit:
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
-
- if (!disable_rx_ic)
- p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ dwmac4_set_rx_owner(p, disable_rx_ic);
}
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
@@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
+static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des0);
+}
+
+static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(addr);
+ p->des1 = 0;
+}
+
+static void dwmac4_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status,
@@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt,
+ .get_addr = dwmac4_get_addr,
+ .set_addr = dwmac4_set_addr,
+ .clear = dwmac4_clear,
};
const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37d457306d1..bf8e5a16f11c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+
+ /* Enable OSP to get best performance */
+ value |= DMA_CONTROL_OSP;
+
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
@@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds)
+ struct stmmac_dma_cfg *dma_cfg, int atds)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
@@ -379,6 +382,9 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* 5.10 Features */
dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
+ dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
+ dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
}
/* Enable/disable TSO feature and set MSS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 8474bf961dd0..c63c1fe3f26b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -184,7 +184,6 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr);
-void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 2978550bb7f6..b2becb80a697 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -7,6 +7,7 @@
#include "common.h"
#include "dwmac4.h"
#include "dwmac5.h"
+#include "stmmac.h"
struct dwmac5_error_desc {
bool valid;
@@ -299,3 +300,197 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
*desc = dwmac5_all_errors[module].desc[offset].desc;
return 0;
}
+
+static int dwmac5_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val;
+ int ret;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val &= ~MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
+ val & RXPI, 1, 10000);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void dwmac5_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val |= MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+}
+
+static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & ADDR;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Write OP */
+ val |= WRRDN;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Start Write */
+ val |= STARTBUSY;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
+ u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + GMAC_CONFIG);
+ val = old_val & ~GMAC_CONFIG_RE;
+ writel(val, ioaddr + GMAC_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwmac5_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & NPE;
+ val |= nve & NVE;
+ writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwmac5_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + GMAC_CONFIG);
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bd4c466d303e..cc810aff7100 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,6 +11,17 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
+#define MTL_RXP_CONTROL_STATUS 0x00000ca0
+#define RXPI BIT(31)
+#define NPE GENMASK(23, 16)
+#define NVE GENMASK(7, 0)
+#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0
+#define STARTBUSY BIT(31)
+#define RXPEIEC GENMASK(22, 21)
+#define RXPEIEE BIT(20)
+#define WRRDN BIT(16)
+#define ADDR GENMASK(15, 0)
+#define MTL_RXP_IACC_DATA 0x00000cb4
#define MTL_ECC_CONTROL 0x00000cc0
#define TSOEE BIT(4)
#define MRXPEE BIT(3)
@@ -48,5 +59,7 @@ int dwmac5_safety_feat_irq_status(struct net_device *ndev,
struct stmmac_safety_stats *stats);
int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
int index, unsigned long *count, const char **desc);
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 3bfb3f584be2..77914c89d749 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(ETDES0_OWN);
}
-static void enh_desc_set_rx_owner(struct dma_desc *p)
+static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void enh_desc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring,
+ .get_addr = enh_desc_get_addr,
+ .set_addr = enh_desc_set_addr,
+ .clear = enh_desc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
new file mode 100644
index 000000000000..14770fc8865e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac HW Interface Handling
+ */
+
+#include "common.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
+ (unsigned int)(reg & GENMASK(15, 8)) >> 8,
+ (unsigned int)(reg & GENMASK(7, 0)));
+ return reg & GENMASK(7, 0);
+}
+
+static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->chain_mode) {
+ dev_info(priv->device, "Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ mac->mode = &chain_mode_ops;
+ } else {
+ dev_info(priv->device, "Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ mac->mode = &ring_mode_ops;
+ }
+}
+
+static int stmmac_dwmac1_quirks(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->plat->enh_desc) {
+ dev_info(priv->device, "Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ dev_info(priv->device, "Enabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else {
+ dev_warn(priv->device, "Extended descriptors not supported\n");
+ }
+
+ mac->desc = &enh_desc_ops;
+ } else {
+ dev_info(priv->device, "Normal descriptors\n");
+ mac->desc = &ndesc_ops;
+ }
+
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
+{
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static const struct stmmac_hwif_entry {
+ bool gmac;
+ bool gmac4;
+ u32 min_id;
+ const struct stmmac_regs_off regs;
+ const void *desc;
+ const void *dma;
+ const void *mac;
+ const void *hwtimestamp;
+ const void *mode;
+ const void *tc;
+ int (*setup)(struct stmmac_priv *priv);
+ int (*quirks)(struct stmmac_priv *priv);
+} stmmac_hw[] = {
+ /* NOTE: New HW versions shall go to the end of this table */
+ {
+ .gmac = false,
+ .gmac4 = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac100_dma_ops,
+ .mac = &dwmac100_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac100_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = true,
+ .gmac4 = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac1000_dma_ops,
+ .mac = &dwmac1000_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac1000_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac4_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = stmmac_dwmac4_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_4_00,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_4_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = NULL,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .min_id = DWMAC_CORE_5_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac510_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = &dwmac510_tc_ops,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }
+};
+
+int stmmac_hwif_init(struct stmmac_priv *priv)
+{
+ bool needs_gmac4 = priv->plat->has_gmac4;
+ bool needs_gmac = priv->plat->has_gmac;
+ const struct stmmac_hwif_entry *entry;
+ struct mac_device_info *mac;
+ bool needs_setup = true;
+ int i, ret;
+ u32 id;
+
+ if (needs_gmac) {
+ id = stmmac_get_id(priv, GMAC_VERSION);
+ } else if (needs_gmac4) {
+ id = stmmac_get_id(priv, GMAC4_VERSION);
+ } else {
+ id = 0;
+ }
+
+ /* Save ID for later use */
+ priv->synopsys_id = id;
+
+ /* Lets assume some safe values first */
+ priv->ptpaddr = priv->ioaddr +
+ (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
+ priv->mmcaddr = priv->ioaddr +
+ (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+
+ /* Check for HW specific setup first */
+ if (priv->plat->setup) {
+ mac = priv->plat->setup(priv);
+ needs_setup = false;
+ } else {
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ }
+
+ if (!mac)
+ return -ENOMEM;
+
+ /* Fallback to generic HW */
+ for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
+ entry = &stmmac_hw[i];
+
+ if (needs_gmac ^ entry->gmac)
+ continue;
+ if (needs_gmac4 ^ entry->gmac4)
+ continue;
+ /* Use synopsys_id var because some setups can override this */
+ if (priv->synopsys_id < entry->min_id)
+ continue;
+
+ /* Only use generic HW helpers if needed */
+ mac->desc = mac->desc ? : entry->desc;
+ mac->dma = mac->dma ? : entry->dma;
+ mac->mac = mac->mac ? : entry->mac;
+ mac->ptp = mac->ptp ? : entry->hwtimestamp;
+ mac->mode = mac->mode ? : entry->mode;
+ mac->tc = mac->tc ? : entry->tc;
+
+ priv->hw = mac;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+
+ /* Entry found */
+ if (needs_setup) {
+ ret = entry->setup(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Run quirks, if needed */
+ if (entry->quirks) {
+ ret = entry->quirks(priv);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
+ id, needs_gmac, needs_gmac4);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index f81ded4a9946..f499a7fad6f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -5,10 +5,12 @@
#ifndef __STMMAC_HWIF_H__
#define __STMMAC_HWIF_H__
+#include <linux/netdevice.h>
+
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
int __result = -EINVAL; \
- if ((__priv)->hw->__module->__cname) { \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \
(__priv)->hw->__module->__cname((__arg0), ##__args); \
__result = 0; \
} \
@@ -17,7 +19,7 @@
#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
int __result = -EINVAL; \
- if ((__priv)->hw->__module->__cname) \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \
__result = (__priv)->hw->__module->__cname((__arg0), ##__args); \
__result; \
})
@@ -57,7 +59,7 @@ struct stmmac_desc_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*set_rx_owner)(struct dma_desc *p);
+ void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
/* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */
@@ -77,6 +79,12 @@ struct stmmac_desc_ops {
void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */
void (*set_mss)(struct dma_desc *p, unsigned int mss);
+ /* get descriptor skbuff address */
+ void (*get_addr)(struct dma_desc *p, unsigned int *addr);
+ /* set descriptor skbuff address */
+ void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
+ /* clear descriptor */
+ void (*clear)(struct dma_desc *p);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -121,6 +129,12 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, display_ring, __args)
#define stmmac_set_mss(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_mss, __args)
+#define stmmac_get_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_addr, __args)
+#define stmmac_set_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_addr, __args)
+#define stmmac_clear_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, clear, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -130,7 +144,7 @@ struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- u32 dma_tx, u32 dma_rx, int atds);
+ int atds);
void (*init_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(void __iomem *ioaddr,
@@ -143,10 +157,6 @@ struct stmmac_dma_ops {
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
- int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz, u8 qmode);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
@@ -189,8 +199,6 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, axi, __args)
#define stmmac_dump_dma_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dump_regs, __args)
-#define stmmac_dma_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, dma_mode, __args)
#define stmmac_dma_rx_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
#define stmmac_dma_tx_mode(__priv, __args...) \
@@ -232,6 +240,7 @@ struct mac_device_info;
struct net_device;
struct rgmii_adv;
struct stmmac_safety_stats;
+struct stmmac_tc_entry;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -301,6 +310,9 @@ struct stmmac_ops {
struct stmmac_safety_stats *stats);
int (*safety_feat_dump)(struct stmmac_safety_stats *stats,
int index, unsigned long *count, const char **desc);
+ /* Flexible RX Parser */
+ int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
};
#define stmmac_core_init(__priv, __args...) \
@@ -365,6 +377,8 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args)
#define stmmac_safety_feat_dump(__priv, __args...) \
stmmac_do_callback(__priv, mac, safety_feat_dump, __args)
+#define stmmac_rxp_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rxp_config, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -418,4 +432,39 @@ struct stmmac_mode_ops {
#define stmmac_clean_desc3(__priv, __args...) \
stmmac_do_void_callback(__priv, mode, clean_desc3, __args)
+struct stmmac_priv;
+struct tc_cls_u32_offload;
+
+struct stmmac_tc_ops {
+ int (*init)(struct stmmac_priv *priv);
+ int (*setup_cls_u32)(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls);
+};
+
+#define stmmac_tc_init(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, init, __args)
+#define stmmac_tc_setup_cls_u32(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+
+struct stmmac_regs_off {
+ u32 ptp_off;
+ u32 mmc_off;
+};
+
+extern const struct stmmac_ops dwmac100_ops;
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
+extern const struct stmmac_ops dwmac1000_ops;
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+extern const struct stmmac_ops dwmac4_ops;
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_ops dwmac410_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac510_tc_ops;
+
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
+
+int stmmac_hwif_init(struct stmmac_priv *priv);
+
#endif /* __STMMAC_HWIF_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 7b1d901bf5bc..de65bb29feba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(TDES0_OWN);
}
-static void ndesc_set_rx_owner(struct dma_desc *p)
+static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
}
@@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n");
}
+static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
+{
+ *addr = le32_to_cpu(p->des2);
+}
+
+static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void ndesc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring,
+ .get_addr = ndesc_get_addr,
+ .set_addr = ndesc_set_addr,
+ .clear = ndesc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index da50451f8999..4d425b1a0c59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -76,11 +76,36 @@ struct stmmac_rx_queue {
struct napi_struct napi ____cacheline_aligned_in_smp;
};
+struct stmmac_tc_entry {
+ bool in_use;
+ bool in_hw;
+ bool is_last;
+ bool is_frag;
+ void *frag_ptr;
+ unsigned int table_pos;
+ u32 handle;
+ u32 prio;
+ struct {
+ u32 match_data;
+ u32 match_en;
+ u8 af:1;
+ u8 rf:1;
+ u8 im:1;
+ u8 nc:1;
+ u8 res1:4;
+ u8 frame_offset;
+ u8 ok_index;
+ u8 dma_ch_no;
+ u32 res2;
+ } __packed val;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
+ bool tx_timer_armed;
int tx_coalesce;
int hwts_tx_en;
@@ -130,6 +155,7 @@ struct stmmac_priv {
int eee_active;
int tx_lpi_timer;
unsigned int mode;
+ unsigned int chain_mode;
int extend_desc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
@@ -150,6 +176,11 @@ struct stmmac_priv {
unsigned long state;
struct workqueue_struct *wq;
struct work_struct service_task;
+
+ /* TC Handling */
+ unsigned int tc_entries_max;
+ unsigned int tc_off_max;
+ struct stmmac_tc_entry *tc_entries;
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 90363a8b15db..c32de53a00d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -45,6 +45,7 @@
#include <linux/seq_file.h>
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
+#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
@@ -769,7 +770,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
netdev_info(priv->dev,
"IEEE 1588-2008 Advanced Timestamp supported\n");
- priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
@@ -1156,10 +1156,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
- else
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
@@ -1344,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
else
p = tx_q->dma_tx + i;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- p->des0 = 0;
- p->des1 = 0;
- p->des2 = 0;
- p->des3 = 0;
- } else {
- p->des2 = 0;
- }
+ stmmac_clear_desc(priv, p);
tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false;
@@ -1797,22 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/* configure all channels */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (chan = 0; chan < rx_channels_count; chan++) {
- qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
- rxfifosz, qmode);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+ }
- for (chan = 0; chan < tx_channels_count; chan++) {
- qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
- txfifosz, qmode);
- }
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
}
}
@@ -1981,23 +1967,14 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
- rxqmode);
- stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
- txqmode);
- } else {
- stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
- }
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
}
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
- int ret = false;
+ int ret;
- /* Safety features are only available in cores >= 5.10 */
- if (priv->synopsys_id < DWMAC_CORE_5_10)
- return ret;
ret = stmmac_safety_feat_irq_status(priv, priv->dev,
priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
if (ret && (ret != -EINVAL)) {
@@ -2023,7 +2000,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
tx_channel_count : rx_channel_count;
u32 chan;
bool poll_scheduled = false;
- int status[channels_to_check];
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
/* Each DMA channel can be used for rx and tx simultaneously, yet
* napi_struct is embedded in struct stmmac_rx_queue rather than in a
@@ -2104,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- } else {
- priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
- priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- }
-
dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
@@ -2122,32 +2095,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
}
/**
- * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
- * @priv: driver private structure
- * Description: select the Enhanced/Alternate or Normal descriptors.
- * In case of Enhanced/Alternate, it checks if the extended descriptors are
- * supported by the HW capability register.
- */
-static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
-{
- if (priv->plat->enh_desc) {
- dev_info(priv->device, "Enhanced/Alternate descriptors\n");
-
- /* GMAC older than 3.50 has no extended descriptors */
- if (priv->synopsys_id >= DWMAC_CORE_3_50) {
- dev_info(priv->device, "Enabled extended descriptors\n");
- priv->extend_desc = 1;
- } else
- dev_warn(priv->device, "Extended descriptors not supported\n");
-
- priv->hw->desc = &enh_desc_ops;
- } else {
- dev_info(priv->device, "Normal descriptors\n");
- priv->hw->desc = &ndesc_ops;
- }
-}
-
-/**
* stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
* @priv: driver private structure
* Description:
@@ -2191,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
- u32 dummy_dma_rx_phy = 0;
- u32 dummy_dma_tx_phy = 0;
u32 chan = 0;
int atds = 0;
int ret = 0;
@@ -2213,48 +2159,39 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- /* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
-
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
-
- stmmac_init_rx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, rx_q->dma_rx_phy,
- chan);
-
- rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
- rx_q->rx_tail_addr, chan);
- }
-
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
- stmmac_init_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, chan);
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
- stmmac_init_tx_chan(priv, priv->ioaddr,
- priv->plat->dma_cfg, tx_q->dma_tx_phy,
- chan);
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
- tx_q->tx_tail_addr, chan);
- }
- } else {
- rx_q = &priv->rx_queue[chan];
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
tx_q = &priv->tx_queue[chan];
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
}
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -2537,12 +2474,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_core_init(priv, priv->hw, dev);
/* Initialize MTL*/
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- stmmac_mtl_configuration(priv);
+ stmmac_mtl_configuration(priv);
/* Initialize Safety Features */
- if (priv->synopsys_id >= DWMAC_CORE_5_10)
- stmmac_safety_feat_configuration(priv);
+ stmmac_safety_feat_configuration(priv);
ret = stmmac_rx_ipc(priv, priv->hw);
if (!ret) {
@@ -3096,10 +3031,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
- if (unlikely(is_jumbo) && likely(priv->synopsys_id <
- DWMAC_CORE_4_00)) {
+ if (unlikely(is_jumbo)) {
entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
- if (unlikely(entry < 0))
+ if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err;
}
@@ -3122,10 +3056,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff_dma[entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- desc->des0 = cpu_to_le32(des);
- else
- desc->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, desc, des);
tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len;
@@ -3180,13 +3112,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+ !priv->tx_timer_armed) {
mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ priv->tx_timer_armed = true;
} else {
priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ priv->tx_timer_armed = false;
}
skb_tx_timestamp(skb);
@@ -3204,10 +3139,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- first->des0 = cpu_to_le32(des);
- else
- first->des2 = cpu_to_le32(des);
+
+ stmmac_set_desc_addr(priv, first, des);
tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -3233,11 +3166,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- if (priv->synopsys_id < DWMAC_CORE_4_00)
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
- else
- stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
- queue);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3321,13 +3251,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
- p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- p->des1 = 0;
- } else {
- p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
- }
-
+ stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
stmmac_refill_desc3(priv, rx_q, p);
if (rx_q->rx_zeroc_thresh > 0)
@@ -3338,10 +3262,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
dma_wmb();
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
- else
- stmmac_set_rx_owner(priv, p);
+ stmmac_set_rx_owner(priv, p, priv->use_riwt);
dma_wmb();
@@ -3429,11 +3350,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int frame_len;
unsigned int des;
- if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- des = le32_to_cpu(p->des0);
- else
- des = le32_to_cpu(p->des2);
-
+ stmmac_get_desc_addr(priv, p, &des);
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
/* If frame length is greater than skb buffer size
@@ -3450,8 +3367,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
*/
- if (unlikely(status != llc_snap))
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
@@ -3722,6 +3644,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+ int mtl_status;
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@@ -3731,20 +3654,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = false;
}
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (queue = 0; queue < queues_count; queue++) {
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[queue];
+ for (queue = 0; queue < queues_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- status |= stmmac_host_mtl_irq_status(priv,
- priv->hw, queue);
+ mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
+ if (mtl_status != -EINVAL)
+ status |= mtl_status;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
- stmmac_set_rx_tail_ptr(priv,
- priv->ioaddr,
- rx_q->rx_tail_addr,
- queue);
- }
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr,
+ queue);
}
/* PCS link status */
@@ -3808,6 +3729,58 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static int stmmac_setup_tc_block(struct stmmac_priv *priv,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return stmmac_setup_tc_block(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -4046,6 +4019,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
@@ -4093,49 +4067,17 @@ static void stmmac_service_task(struct work_struct *work)
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- struct mac_device_info *mac;
-
- /* Identify the MAC HW device */
- if (priv->plat->setup) {
- mac = priv->plat->setup(priv);
- } else if (priv->plat->has_gmac) {
- priv->dev->priv_flags |= IFF_UNICAST_FLT;
- mac = dwmac1000_setup(priv->ioaddr,
- priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries,
- &priv->synopsys_id);
- } else if (priv->plat->has_gmac4) {
- priv->dev->priv_flags |= IFF_UNICAST_FLT;
- mac = dwmac4_setup(priv->ioaddr,
- priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries,
- &priv->synopsys_id);
- } else {
- mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
- }
- if (!mac)
- return -ENOMEM;
-
- priv->hw = mac;
+ int ret;
/* dwmac-sun8i only work in chain mode */
if (priv->plat->has_sun8i)
chain_mode = 1;
+ priv->chain_mode = chain_mode;
- /* To use the chained or ring mode */
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- priv->hw->mode = &dwmac4_ring_mode_ops;
- } else {
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- dev_info(priv->device, "Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
- } else {
- priv->hw->mode = &ring_mode_ops;
- dev_info(priv->device, "Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
- }
- }
+ /* Initialize HW Interface */
+ ret = stmmac_hwif_init(priv);
+ if (ret)
+ return ret;
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
@@ -4169,12 +4111,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
dev_info(priv->device, "No HW DMA feature register supported\n");
}
- /* To use alternate (extended), normal or GMAC4 descriptor structures */
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
- priv->hw->desc = &dwmac4_desc_ops;
- else
- stmmac_selec_desc_mode(priv);
-
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
dev_info(priv->device, "RX Checksum Offload Engine supported\n");
@@ -4283,6 +4219,11 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
priv->tso = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index f5f37bfa1d58..5df1a608e566 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -233,10 +233,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->phy_mask = mdio_bus_data->phy_mask;
new_bus->parent = priv->device;
- if (mdio_node)
- err = of_mdiobus_register(new_bus, mdio_node);
- else
- err = mdiobus_register(new_bus);
+ err = of_mdiobus_register(new_bus, mdio_node);
if (err != 0) {
dev_err(dev, "Cannot register the MDIO bus\n");
goto bus_register_fail;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
new file mode 100644
index 000000000000..881c94b73e2f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac TC Handling (HW only)
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include "common.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "stmmac.h"
+
+static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
+{
+ memset(entry, 0, sizeof(*entry));
+ entry->in_use = true;
+ entry->is_last = true;
+ entry->is_frag = false;
+ entry->prio = ~0x0;
+ entry->handle = 0;
+ entry->val.match_data = 0x0;
+ entry->val.match_en = 0x0;
+ entry->val.af = 1;
+ entry->val.dma_ch_no = 0x0;
+}
+
+static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls,
+ bool free)
+{
+ struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
+ u32 loc = cls->knode.handle;
+ int i;
+
+ for (i = 0; i < priv->tc_entries_max; i++) {
+ entry = &priv->tc_entries[i];
+ if (!entry->in_use && !first && free)
+ first = entry;
+ if (entry->handle == loc && !free)
+ dup = entry;
+ }
+
+ if (dup)
+ return dup;
+ if (first) {
+ first->handle = loc;
+ first->in_use = true;
+
+ /* Reset HW values */
+ memset(&first->val, 0, sizeof(first->val));
+ }
+
+ return first;
+}
+
+static int tc_fill_actions(struct stmmac_tc_entry *entry,
+ struct stmmac_tc_entry *frag,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *action_entry = entry;
+ const struct tc_action *act;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (!tcf_exts_has_actions(exts))
+ return -EINVAL;
+ if (frag)
+ action_entry = frag;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(act, &actions, list) {
+ /* Accept */
+ if (is_tcf_gact_ok(act)) {
+ action_entry->val.af = 1;
+ break;
+ }
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ action_entry->val.rf = 1;
+ break;
+ }
+
+ /* Unsupported */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tc_fill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry, *frag = NULL;
+ struct tc_u32_sel *sel = cls->knode.sel;
+ u32 off, data, mask, real_off, rem;
+ u32 prio = cls->common.prio;
+ int ret;
+
+ /* Only 1 match per entry */
+ if (sel->nkeys <= 0 || sel->nkeys > 1)
+ return -EINVAL;
+
+ off = sel->keys[0].off << sel->offshift;
+ data = sel->keys[0].val;
+ mask = sel->keys[0].mask;
+
+ switch (ntohs(cls->common.protocol)) {
+ case ETH_P_ALL:
+ break;
+ case ETH_P_IP:
+ off += ETH_HLEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off > priv->tc_off_max)
+ return -EINVAL;
+
+ real_off = off / 4;
+ rem = off % 4;
+
+ entry = tc_find_entry(priv, cls, true);
+ if (!entry)
+ return -EINVAL;
+
+ if (rem) {
+ frag = tc_find_entry(priv, cls, true);
+ if (!frag) {
+ ret = -EINVAL;
+ goto err_unuse;
+ }
+
+ entry->frag_ptr = frag;
+ entry->val.match_en = (mask << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.match_data = (data << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+
+ frag->val.match_en = (mask >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.match_data = (data >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.frame_offset = real_off + 1;
+ frag->prio = prio;
+ frag->is_frag = true;
+ } else {
+ entry->frag_ptr = NULL;
+ entry->val.match_en = mask;
+ entry->val.match_data = data;
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+ }
+
+ ret = tc_fill_actions(entry, frag, cls);
+ if (ret)
+ goto err_unuse;
+
+ return 0;
+
+err_unuse:
+ if (frag)
+ frag->in_use = false;
+ entry->in_use = false;
+ return ret;
+}
+
+static void tc_unfill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry;
+
+ entry = tc_find_entry(priv, cls, false);
+ if (!entry)
+ return;
+
+ entry->in_use = false;
+ if (entry->frag_ptr) {
+ entry = entry->frag_ptr;
+ entry->is_frag = false;
+ entry->in_use = false;
+ }
+}
+
+static int tc_config_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ int ret;
+
+ ret = tc_fill_entry(priv, cls);
+ if (ret)
+ return ret;
+
+ ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+ if (ret)
+ goto err_unfill;
+
+ return 0;
+
+err_unfill:
+ tc_unfill_entry(priv, cls);
+ return ret;
+}
+
+static int tc_delete_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ int ret;
+
+ /* Set entry and fragments as not used */
+ tc_unfill_entry(priv, cls);
+
+ ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tc_setup_cls_u32(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ switch (cls->command) {
+ case TC_CLSU32_REPLACE_KNODE:
+ tc_unfill_entry(priv, cls);
+ /* Fall through */
+ case TC_CLSU32_NEW_KNODE:
+ return tc_config_knode(priv, cls);
+ case TC_CLSU32_DELETE_KNODE:
+ return tc_delete_knode(priv, cls);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tc_init(struct stmmac_priv *priv)
+{
+ struct dma_features *dma_cap = &priv->dma_cap;
+ unsigned int count;
+
+ if (!dma_cap->frpsel)
+ return -EINVAL;
+
+ switch (dma_cap->frpbs) {
+ case 0x0:
+ priv->tc_off_max = 64;
+ break;
+ case 0x1:
+ priv->tc_off_max = 128;
+ break;
+ case 0x2:
+ priv->tc_off_max = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dma_cap->frpes) {
+ case 0x0:
+ count = 64;
+ break;
+ case 0x1:
+ count = 128;
+ break;
+ case 0x2:
+ count = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reserve one last filter which lets all pass */
+ priv->tc_entries_max = count;
+ priv->tc_entries = devm_kzalloc(priv->device,
+ sizeof(*priv->tc_entries) * count, GFP_KERNEL);
+ if (!priv->tc_entries)
+ return -ENOMEM;
+
+ tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
+
+ dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
+ priv->tc_entries_max, priv->tc_off_max);
+ return 0;
+}
+
+const struct stmmac_tc_ops dwmac510_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+};
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f081de4f38d7..88c12474a0c3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
len = (val & RCR_ENTRY_L2_LEN) >>
RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
+ append_size = len + ETH_HLEN + ETH_FCS_LEN;
addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_PKTBUFSZ_SHIFT];
off = addr & ~PAGE_MASK;
- append_size = rcr_size;
if (num_rcr == 1) {
int ptype;
@@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
else
skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 48a541eb0af2..9263d638bd6d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_TI
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
select TI_DAVINCI_CPDMA
select PHYLIB
@@ -30,7 +30,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -40,7 +40,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
---help---
This driver supports TI's DaVinci CPDMA dma engine.
@@ -60,7 +60,7 @@ config TI_CPSW_ALE
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
select TI_CPSW_PHY_SEL
@@ -75,7 +75,7 @@ config TI_CPSW
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
- depends on TI_CPSW || TI_KEYSTONE_NETCP
+ depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST
depends on POSIX_TIMERS
---help---
This driver supports the Common Platform Time Sync unit of
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 18013645e76c..0c1adad7415d 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);
- of_node_put(node);
+ if (!dev) {
+ dev_err(dev, "unable to find platform device for %pOF\n", node);
+ goto out;
+ }
+
priv = dev_get_drvdata(dev);
priv->cpsw_phy_sel(priv, phy_mode, slave);
put_device(dev);
+out:
+ of_node_put(node);
}
EXPORT_SYMBOL_GPL(cpsw_phy_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 30371274409d..643cd2d9dfb6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -36,6 +36,7 @@
#include <linux/of_device.h>
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
#include <linux/pinctrl/consumer.h>
@@ -129,7 +130,7 @@ do { \
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x01234567
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_RX_VLAN_ENCAP BIT(2)
@@ -957,7 +958,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
{
u32 ch_map;
int num_tx, cur_budget, ch;
@@ -984,7 +985,21 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
if (num_tx < budget) {
napi_complete(napi_tx);
writel(0xff, &cpsw->wr_regs->tx_en);
- if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
+ }
+
+ return num_tx;
+}
+
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
cpsw->tx_irq_disabled = false;
enable_irq(cpsw->irqs_table[1]);
}
@@ -993,7 +1008,7 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
return num_tx;
}
-static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
{
u32 ch_map;
int num_rx, cur_budget, ch;
@@ -1020,7 +1035,21 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
if (num_rx < budget) {
napi_complete_done(napi_rx, num_rx);
writel(0xff, &cpsw->wr_regs->rx_en);
- if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ }
+
+ return num_rx;
+}
+
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
cpsw->rx_irq_disabled = false;
enable_irq(cpsw->irqs_table[0]);
}
@@ -1252,8 +1281,8 @@ static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
for (i = 0; i < ch_stats_len; i++) {
line = i % CPSW_STATS_CH_LEN;
snprintf(*p, ETH_GSTRING_LEN,
- "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
- i / CPSW_STATS_CH_LEN,
+ "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+ (long)(i / CPSW_STATS_CH_LEN),
cpsw_gstrings_ch_stats[line].stat_string);
*p += ETH_GSTRING_LEN;
}
@@ -1340,6 +1369,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2362,9 +2393,9 @@ static void cpsw_get_channels(struct net_device *ndev,
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
ch->max_combined = 0;
- ch->max_rx = CPSW_MAX_QUEUES;
- ch->max_tx = CPSW_MAX_QUEUES;
ch->max_other = 0;
ch->other_count = 0;
ch->rx_count = cpsw->rx_ch_num;
@@ -2375,6 +2406,11 @@ static void cpsw_get_channels(struct net_device *ndev,
static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
struct ethtool_channels *ch)
{
+ if (cpsw->quirk_irq) {
+ dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+ return -EOPNOTSUPP;
+ }
+
if (ch->combined_count)
return -EINVAL;
@@ -2915,44 +2951,20 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
return ret;
}
-#define CPSW_QUIRK_IRQ BIT(0)
-
-static const struct platform_device_id cpsw_devtype[] = {
- {
- /* keep it for existing comaptibles */
- .name = "cpsw",
- .driver_data = CPSW_QUIRK_IRQ,
- }, {
- .name = "am335x-cpsw",
- .driver_data = CPSW_QUIRK_IRQ,
- }, {
- .name = "am4372-cpsw",
- .driver_data = 0,
- }, {
- .name = "dra7-cpsw",
- .driver_data = 0,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, cpsw_devtype);
-
-enum ti_cpsw_type {
- CPSW = 0,
- AM335X_CPSW,
- AM4372_CPSW,
- DRA7_CPSW,
-};
-
static const struct of_device_id cpsw_of_mtable[] = {
- { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
- { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
- { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
- { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+ { .compatible = "ti,cpsw"},
+ { .compatible = "ti,am335x-cpsw"},
+ { .compatible = "ti,am4372-cpsw"},
+ { .compatible = "ti,dra7-cpsw"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
static int cpsw_probe(struct platform_device *pdev)
{
struct clk *clk;
@@ -2964,9 +2976,9 @@ static int cpsw_probe(struct platform_device *pdev)
void __iomem *ss_regs;
void __iomem *cpts_regs;
struct resource *res, *ss_res;
- const struct of_device_id *of_id;
struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
+ const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
int ret = 0, i;
int irq;
@@ -3139,6 +3151,10 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dt_ret;
}
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
+
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
dev_err(priv->dev, "error initializing tx dma channel\n");
@@ -3178,19 +3194,16 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
- if (of_id) {
- pdev->id_entry = of_id->data;
- if (pdev->id_entry->driver_data)
- cpsw->quirk_irq = true;
- }
-
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
- netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
+ CPSW_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
+ CPSW_POLL_WEIGHT);
cpsw_split_res(ndev);
/* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index e7b76f6b4f67..6f63c8729afc 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -294,7 +294,8 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
delay = CPTS_SKB_TX_WORK_TIMEOUT;
spin_unlock_irqrestore(&cpts->lock, flags);
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ pr_debug("cpts overflow check at %lld.%09ld\n",
+ (long long)ts.tv_sec, ts.tv_nsec);
return (long)delay;
}
@@ -564,7 +565,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
dev_err(dev, "Failed to get cpts refclk\n");
- return ERR_PTR(PTR_ERR(cpts->refclk));
+ return ERR_CAST(cpts->refclk);
}
clk_prepare(cpts->refclk);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 31ae04117f0a..cdbddf16dd29 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -191,7 +191,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
return;
WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
- "cpdma_desc_pool size %d != avail %d",
+ "cpdma_desc_pool size %zd != avail %zd",
gen_pool_size(pool->gen_pool),
gen_pool_avail(pool->gen_pool));
if (pool->cpumap)
@@ -1080,7 +1080,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
writel_relaxed(buffer, &desc->hw_buffer);
writel_relaxed(len, &desc->hw_len);
writel_relaxed(mode | len, &desc->hw_mode);
- writel_relaxed(token, &desc->sw_token);
+ writel_relaxed((uintptr_t)token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(len, &desc->sw_len);
desc_read(desc, sw_len);
@@ -1121,15 +1121,15 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t buff_dma;
int origlen;
- void *token;
+ uintptr_t token;
- token = (void *)desc_read(desc, sw_token);
+ token = desc_read(desc, sw_token);
buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);
dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
cpdma_desc_free(pool, desc, 1);
- (*chan->handler)(token, outlen, status);
+ (*chan->handler)((void *)token, outlen, status);
}
static int __cpdma_chan_process(struct cpdma_chan *chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index abceea802ea1..be0fec17d95d 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1930,8 +1930,8 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
- "(regs: %p, irq: %d)\n",
- (void *)priv->emac_base_phys, ndev->irq);
+ "(regs: %pa, irq: %d)\n",
+ &priv->emac_base_phys, ndev->irq);
}
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 3c33f4504d8e..8ac72831af05 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
#include <linux/of.h>
@@ -227,14 +228,14 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
static inline int wait_for_idle(struct davinci_mdio_data *data)
{
struct davinci_mdio_regs __iomem *regs = data->regs;
- unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 val, ret;
- while (time_after(timeout, jiffies)) {
- if (__raw_readl(&regs->control) & CONTROL_IDLE)
- return 0;
- }
- dev_err(data->dev, "timed out waiting for idle\n");
- return -ETIMEDOUT;
+ ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
+ 0, MDIO_TIMEOUT * 1000);
+ if (ret)
+ dev_err(data->dev, "timed out waiting for idle\n");
+
+ return ret;
}
static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
@@ -428,12 +429,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
* defined to support backward compatibility with DTs which assume that
* Davinci MDIO will always scan the bus for PHYs detection.
*/
- if (dev->of_node && of_get_child_count(dev->of_node)) {
+ if (dev->of_node && of_get_child_count(dev->of_node))
data->skip_scan = true;
- ret = of_mdiobus_register(data->bus, dev->of_node);
- } else {
- ret = mdiobus_register(data->bus);
- }
+
+ ret = of_mdiobus_register(data->bus, dev->of_node);
if (ret)
goto bail_out;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b919e89a9b93..750eaa53bf0c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -36,6 +36,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
#define GENEVE_VER 0
#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+#define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN)
+#define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN)
/* per-network namespace private data for this module */
struct geneve_net {
@@ -826,8 +828,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return PTR_ERR(rt);
if (skb_dst(skb)) {
- int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
- GENEVE_BASE_HLEN - info->options_len - 14;
+ int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
+ info->options_len;
skb_dst_update_pmtu(skb, mtu);
}
@@ -872,8 +874,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return PTR_ERR(dst);
if (skb_dst(skb)) {
- int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
- GENEVE_BASE_HLEN - info->options_len - 14;
+ int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
skb_dst_update_pmtu(skb, mtu);
}
@@ -941,11 +942,10 @@ tx_error:
static int geneve_change_mtu(struct net_device *dev, int new_mtu)
{
- /* Only possible if called internally, ndo_change_mtu path's new_mtu
- * is guaranteed to be between dev->min_mtu and dev->max_mtu.
- */
if (new_mtu > dev->max_mtu)
new_mtu = dev->max_mtu;
+ else if (new_mtu < dev->min_mtu)
+ new_mtu = dev->min_mtu;
dev->mtu = new_mtu;
return 0;
@@ -1261,7 +1261,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_REMOTE6]) {
- #if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink && (ip_tunnel_info_af(info) == AF_INET)) {
attrtype = IFLA_GENEVE_REMOTE6;
goto change_notsup;
@@ -1387,6 +1387,48 @@ change_notsup:
return -EOPNOTSUPP;
}
+static void geneve_link_config(struct net_device *dev,
+ struct ip_tunnel_info *info, struct nlattr *tb[])
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ int ldev_mtu = 0;
+
+ if (tb[IFLA_MTU]) {
+ geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ return;
+ }
+
+ switch (ip_tunnel_info_af(info)) {
+ case AF_INET: {
+ struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst };
+ struct rtable *rt = ip_route_output_key(geneve->net, &fl4);
+
+ if (!IS_ERR(rt) && rt->dst.dev) {
+ ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN;
+ ip_rt_put(rt);
+ }
+ break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ struct rt6_info *rt = rt6_lookup(geneve->net,
+ &info->key.u.ipv6.dst, NULL, 0,
+ NULL, 0);
+
+ if (rt && rt->dst.dev)
+ ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
+ ip6_rt_put(rt);
+ break;
+ }
+#endif
+ }
+
+ if (ldev_mtu <= 0)
+ return;
+
+ geneve_change_mtu(dev, ldev_mtu - info->options_len);
+}
+
static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1402,8 +1444,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- return geneve_configure(net, dev, extack, &info, metadata,
- use_udp6_rx_checksums);
+ err = geneve_configure(net, dev, extack, &info, metadata,
+ use_udp6_rx_checksums);
+ if (err)
+ return err;
+
+ geneve_link_config(dev, &info, tb);
+
+ return 0;
}
/* Quiesces the geneve device data path for both TX and RX.
@@ -1477,8 +1525,10 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
- if (!geneve_dst_addr_equal(&geneve->info, &info))
+ if (!geneve_dst_addr_equal(&geneve->info, &info)) {
dst_cache_reset(&info.dst_cache);
+ geneve_link_config(dev, &info, tb);
+ }
geneve_quiesce(geneve, &gs4, &gs6);
geneve->info = info;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index c180b480f8ef..13e4c1eff353 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -217,7 +217,7 @@ static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
c = *s++;
else if (len > 1)
c = crc >> 8;
- else if (len > 0)
+ else
c = crc & 0xff;
len--;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 1ab97d99b9ba..f41116488079 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -867,7 +867,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
dev->name);
goto drop;
case E_FRM_ERR:
- printk(KERN_WARNING "%s: Framming Error\n",
+ printk(KERN_WARNING "%s: Framing Error\n",
dev->name);
goto drop;
case E_FLG_SYN_ERR:
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index 936968d23559..0765d5f61714 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -1,5 +1,6 @@
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
depends on HYPERV
+ select UCS2_STRING
help
Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 6ebe39a3dde6..1be34d2e3563 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -110,7 +110,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
u16 hashkey_size;
/* The offset of the secret key from the beginning of this structure */
- u32 kashkey_offset;
+ u32 hashkey_offset;
u32 processor_masks_offset;
u32 num_processor_masks;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index e7308958b7a9..d2ee66c259a7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -652,16 +652,14 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
sync_change_bit(index, net_device->send_section_map);
}
-static void netvsc_send_tx_complete(struct netvsc_device *net_device,
- struct vmbus_channel *incoming_channel,
- struct hv_device *device,
+static void netvsc_send_tx_complete(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc,
int budget)
{
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
- struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *ndev_ctx = netdev_priv(ndev);
- struct vmbus_channel *channel = device->channel;
u16 q_idx = 0;
int queue_sends;
@@ -675,7 +673,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = packet->q_idx;
- channel = incoming_channel;
tx_stats = &net_device->chan_table[q_idx].tx_stats;
@@ -705,14 +702,13 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
}
}
-static void netvsc_send_completion(struct netvsc_device *net_device,
+static void netvsc_send_completion(struct net_device *ndev,
+ struct netvsc_device *net_device,
struct vmbus_channel *incoming_channel,
- struct hv_device *device,
const struct vmpacket_descriptor *desc,
int budget)
{
- struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
- struct net_device *ndev = hv_get_drvdata(device);
+ const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
@@ -726,8 +722,8 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
break;
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
- netvsc_send_tx_complete(net_device, incoming_channel,
- device, desc, budget);
+ netvsc_send_tx_complete(ndev, net_device, incoming_channel,
+ desc, budget);
break;
default:
@@ -1092,12 +1088,11 @@ static void enq_receive_complete(struct net_device *ndev,
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
- struct net_device_context *net_device_ctx,
- struct hv_device *device,
struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc,
- struct nvsp_message *nvsp)
+ const struct nvsp_message *nvsp)
{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
@@ -1158,13 +1153,12 @@ static int netvsc_receive(struct net_device *ndev,
return count;
}
-static void netvsc_send_table(struct hv_device *hdev,
- struct nvsp_message *nvmsg)
+static void netvsc_send_table(struct net_device *ndev,
+ const struct nvsp_message *nvmsg)
{
- struct net_device *ndev = hv_get_drvdata(hdev);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- int i;
u32 count, *tab;
+ int i;
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
@@ -1179,24 +1173,25 @@ static void netvsc_send_table(struct hv_device *hdev,
net_device_ctx->tx_table[i] = tab[i];
}
-static void netvsc_send_vf(struct net_device_context *net_device_ctx,
- struct nvsp_message *nvmsg)
+static void netvsc_send_vf(struct net_device *ndev,
+ const struct nvsp_message *nvmsg)
{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
-static inline void netvsc_receive_inband(struct hv_device *hdev,
- struct net_device_context *net_device_ctx,
- struct nvsp_message *nvmsg)
+static void netvsc_receive_inband(struct net_device *ndev,
+ const struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
- netvsc_send_table(hdev, nvmsg);
+ netvsc_send_table(ndev, nvmsg);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(net_device_ctx, nvmsg);
+ netvsc_send_vf(ndev, nvmsg);
break;
}
}
@@ -1208,24 +1203,23 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
const struct vmpacket_descriptor *desc,
int budget)
{
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(net_device, channel, device,
+ netvsc_send_completion(ndev, net_device, channel,
desc, budget);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- return netvsc_receive(ndev, net_device, net_device_ctx,
- device, channel, desc, nvmsg);
+ return netvsc_receive(ndev, net_device, channel,
+ desc, nvmsg);
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(device, net_device_ctx, nvmsg);
+ netvsc_receive_inband(ndev, nvmsg);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ecc84954c511..da07ccdf84bf 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto rx_handler_failed;
}
- ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
+ ret = netdev_master_upper_dev_link(vf_netdev, ndev,
+ NULL, NULL, NULL);
if (ret != 0) {
netdev_err(vf_netdev,
"can not set master device %s (err = %d)\n",
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 3b6dbacaf77d..7f3dab4b4cbc 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -752,7 +752,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
rssp->hashkey_size = NETVSC_HASH_KEYLEN;
- rssp->kashkey_offset = rssp->indirect_taboffset +
+ rssp->hashkey_offset = rssp->indirect_taboffset +
rssp->indirect_tabsize;
/* Set indirection table entries */
@@ -761,7 +761,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
itab[i] = rdev->rx_table[i];
/* Set hask key values */
- keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
+ keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
ret = rndis_filter_send_request(rdev, request);
@@ -1316,7 +1316,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
rndis_device->link_state ? "down" : "up");
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
- return net_device;
+ goto out;
rndis_filter_query_link_speed(rndis_device, net_device);
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 9fb9b565a002..4f684cbcdc57 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -1045,7 +1045,7 @@ static int atusb_probe(struct usb_interface *interface,
atusb->tx_dr.bRequest = ATUSB_TX;
atusb->tx_dr.wValue = cpu_to_le16(0);
- atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!atusb->tx_urb)
goto fail;
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 55a22c761808..de0d7f28a181 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -1267,7 +1267,7 @@ mcr20a_probe(struct spi_device *spi)
ret = mcr20a_get_platform_data(spi, pdata);
if (ret < 0) {
dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
- return ret;
+ goto free_pdata;
}
/* init reset gpio */
@@ -1275,7 +1275,7 @@ mcr20a_probe(struct spi_device *spi)
ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
GPIOF_OUT_INIT_HIGH, "reset");
if (ret)
- return ret;
+ goto free_pdata;
}
/* reset mcr20a */
@@ -1291,7 +1291,8 @@ mcr20a_probe(struct spi_device *spi)
hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
if (!hw) {
dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_pdata;
}
/* init mcr20a local data */
@@ -1308,8 +1309,10 @@ mcr20a_probe(struct spi_device *spi)
/* init buf */
lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
- if (!lp->buf)
- return -ENOMEM;
+ if (!lp->buf) {
+ ret = -ENOMEM;
+ goto free_dev;
+ }
mcr20a_setup_tx_spi_messages(lp);
mcr20a_setup_rx_spi_messages(lp);
@@ -1366,6 +1369,8 @@ mcr20a_probe(struct spi_device *spi)
free_dev:
ieee802154_free_hw(lp->hw);
+free_pdata:
+ kfree(pdata);
return ret;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 450eec264a5e..4377c26f714d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_CHANGEADDR:
- list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
+ }
break;
case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9cbb0c8a896a..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
- goto put_dev;
+ goto unregister;
/* need to be already registered so that ->init has run and
* the MAC addr is set
@@ -3316,8 +3316,7 @@ del_dev:
macsec_del_dev(macsec);
unlink:
netdev_upper_dev_unlink(real_dev, dev);
-put_dev:
- dev_put(real_dev);
+unregister:
unregister_netdevice(dev);
return err;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 725f4b4afc6d..adde8fc45588 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -514,6 +514,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
+ void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
@@ -533,9 +534,14 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ /* For packets that are non-multicast and not bridged we will pass
+ * the necessary information so that the lowerdev can distinguish
+ * the source of the packets via the accel_priv value.
+ */
+ accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit(skb);
+ return dev_queue_xmit_accel(skb, accel_priv);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@@ -552,19 +558,14 @@ static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, str
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct macvlan_dev *vlan = netdev_priv(dev);
unsigned int len = skb->len;
int ret;
- struct macvlan_dev *vlan = netdev_priv(dev);
if (unlikely(netpoll_tx_running(dev)))
return macvlan_netpoll_send_skb(vlan, skb);
- if (vlan->fwd_priv) {
- skb->dev = vlan->lowerdev;
- ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
- } else {
- ret = macvlan_queue_xmit(skb, dev);
- }
+ ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *pcpu_stats;
@@ -613,26 +614,27 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
- vlan->fwd_priv =
- lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
-
- /* If we get a NULL pointer back, or if we get an error
- * then we should just fall through to the non accelerated path
- */
- if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
- vlan->fwd_priv = NULL;
- } else
- return 0;
- }
-
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_uc_add(lowerdev, dev->dev_addr);
- if (err < 0)
- goto out;
+ /* Attempt to populate accel_priv which is used to offload the L2
+ * forwarding requests for unicast packets.
+ */
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
+ vlan->accel_priv =
+ lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+ /* If earlier attempt to offload failed, or accel_priv is not
+ * populated we must add the unicast address to the lower device.
+ */
+ if (IS_ERR_OR_NULL(vlan->accel_priv)) {
+ vlan->accel_priv = NULL;
+ err = dev_uc_add(lowerdev, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(lowerdev, 1);
if (err < 0)
@@ -653,13 +655,14 @@ clear_multi:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
del_unicast:
- dev_uc_del(lowerdev, dev->dev_addr);
-out:
- if (vlan->fwd_priv) {
+ if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
- vlan->fwd_priv);
- vlan->fwd_priv = NULL;
+ vlan->accel_priv);
+ vlan->accel_priv = NULL;
+ } else {
+ dev_uc_del(lowerdev, dev->dev_addr);
}
+out:
return err;
}
@@ -668,11 +671,10 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (vlan->fwd_priv) {
+ if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
- vlan->fwd_priv);
- vlan->fwd_priv = NULL;
- return 0;
+ vlan->accel_priv);
+ vlan->accel_priv = NULL;
}
dev_uc_unsync(lowerdev, dev);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bdfbabb86ee0..343989f9f9d9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -118,11 +118,18 @@ config MDIO_I2C
config MDIO_MOXART
tristate "MOXA ART MDIO interface support"
- depends on ARCH_MOXART
+ depends on ARCH_MOXART || COMPILE_TEST
help
This driver supports the MDIO interface found in the network
interface units of the MOXA ART SoC
+config MDIO_MSCC_MIIM
+ tristate "Microsemi MIIM interface support"
+ depends on HAS_IOMEM
+ help
+ This driver supports the MIIM (MDIO) interface found in the network
+ switches of the Microsemi SoCs
+
config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses"
depends on 64BIT
@@ -135,7 +142,7 @@ config MDIO_OCTEON
config MDIO_SUN4I
tristate "Allwinner sun4i MDIO interface support"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
help
This driver supports the MDIO interface found in the network
interface units of the Allwinner SoC that have an EMAC (A10,
@@ -218,6 +225,12 @@ config AQUANTIA_PHY
---help---
Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+config ASIX_PHY
+ tristate "Asix PHYs"
+ help
+ Currently supports the Asix Electronics PHY found in the X-Surf 100
+ AX88796B package.
+
config AT803X_PHY
tristate "AT803X PHYs"
---help---
@@ -285,6 +298,11 @@ config DP83822_PHY
---help---
Supports the DP83822 PHY.
+config DP83TC811_PHY
+ tristate "Texas Instruments DP83TC822 PHY"
+ ---help---
+ Supports the DP83TC822 PHY.
+
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
---help---
@@ -354,6 +372,11 @@ config MICROCHIP_PHY
help
Supports the LAN88XX PHYs.
+config MICROCHIP_T1_PHY
+ tristate "Microchip T1 PHYs"
+ ---help---
+ Supports the LAN87XX PHYs.
+
config MICROSEMI_PHY
tristate "Microsemi PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 01acbcb2c798..5805c0b7d60e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
@@ -45,6 +46,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
+obj-$(CONFIG_ASIX_PHY) += asix.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83822_PHY) += dp83822.o
+obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
@@ -70,6 +73,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
+obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
new file mode 100644
index 000000000000..8ebe7f5484ae
--- /dev/null
+++ b/drivers/net/phy/asix.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for Asix PHYs
+ *
+ * Author: Michael Schmitz <schmitzmic@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define PHY_ID_ASIX_AX88796B 0x003b1841
+
+MODULE_DESCRIPTION("Asix PHY driver");
+MODULE_AUTHOR("Michael Schmitz <schmitzmic@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/**
+ * asix_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ * Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation
+ * such as used on the Individual Computers' X-Surf 100 Zorro card.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int asix_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Asix PHY won't reset unless reset bit toggles */
+ ret = phy_write(phydev, MII_BMCR, 0);
+ if (ret < 0)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static struct phy_driver asix_driver[] = { {
+ .phy_id = PHY_ID_ASIX_AX88796B,
+ .name = "Asix Electronics AX88796B",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .soft_reset = asix_soft_reset,
+} };
+
+module_phy_driver(asix_driver);
+
+static struct mdio_device_id __maybe_unused asix_tbl[] = {
+ { PHY_ID_ASIX_AX88796B, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, asix_tbl);
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..0876aec7328c 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -346,10 +346,6 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
-
/* Caller is supposed to provide appropriate storage for the library code to
* access the shadow copy
*/
@@ -362,7 +358,7 @@ static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow,
val = phy_read(phydev, stat.reg);
if (val < 0) {
- ret = UINT64_MAX;
+ ret = U64_MAX;
} else {
val >>= stat.shift;
val = val & ((1 << stat.bits) - 1);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 3bb6b66dc7bf..f9c25912eb98 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -720,6 +720,15 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
+}, {
+ .phy_id = PHY_ID_BCM89610,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM89610",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
} };
module_phy_driver(broadcom_drivers);
@@ -741,6 +750,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCMAC131, 0xfffffff0 },
{ PHY_ID_BCM5241, 0xfffffff0 },
{ PHY_ID_BCM5395, 0xfffffff0 },
+ { PHY_ID_BCM89610, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
new file mode 100644
index 000000000000..081d99aa3985
--- /dev/null
+++ b/drivers/net/phy/dp83tc811.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DP83TC811 PHY
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#define DP83TC811_PHY_ID 0x2000a253
+#define DP83811_DEVADDR 0x1f
+
+#define MII_DP83811_SGMII_CTRL 0x09
+#define MII_DP83811_INT_STAT1 0x12
+#define MII_DP83811_INT_STAT2 0x13
+#define MII_DP83811_RESET_CTRL 0x1f
+
+#define DP83811_HW_RESET BIT(15)
+#define DP83811_SW_RESET BIT(14)
+
+/* INT_STAT1 bits */
+#define DP83811_RX_ERR_HF_INT_EN BIT(0)
+#define DP83811_MS_TRAINING_INT_EN BIT(1)
+#define DP83811_ANEG_COMPLETE_INT_EN BIT(2)
+#define DP83811_ESD_EVENT_INT_EN BIT(3)
+#define DP83811_WOL_INT_EN BIT(4)
+#define DP83811_LINK_STAT_INT_EN BIT(5)
+#define DP83811_ENERGY_DET_INT_EN BIT(6)
+#define DP83811_LINK_QUAL_INT_EN BIT(7)
+
+/* INT_STAT2 bits */
+#define DP83811_JABBER_DET_INT_EN BIT(0)
+#define DP83811_POLARITY_INT_EN BIT(1)
+#define DP83811_SLEEP_MODE_INT_EN BIT(2)
+#define DP83811_OVERTEMP_INT_EN BIT(3)
+#define DP83811_OVERVOLTAGE_INT_EN BIT(6)
+#define DP83811_UNDERVOLTAGE_INT_EN BIT(7)
+
+#define MII_DP83811_RXSOP1 0x04a5
+#define MII_DP83811_RXSOP2 0x04a6
+#define MII_DP83811_RXSOP3 0x04a7
+
+/* WoL Registers */
+#define MII_DP83811_WOL_CFG 0x04a0
+#define MII_DP83811_WOL_STAT 0x04a1
+#define MII_DP83811_WOL_DA1 0x04a2
+#define MII_DP83811_WOL_DA2 0x04a3
+#define MII_DP83811_WOL_DA3 0x04a4
+
+/* WoL bits */
+#define DP83811_WOL_MAGIC_EN BIT(0)
+#define DP83811_WOL_SECURE_ON BIT(5)
+#define DP83811_WOL_EN BIT(7)
+#define DP83811_WOL_INDICATION_SEL BIT(8)
+#define DP83811_WOL_CLR_INDICATION BIT(11)
+
+/* SGMII CTRL bits */
+#define DP83811_TDR_AUTO BIT(8)
+#define DP83811_SGMII_EN BIT(12)
+#define DP83811_SGMII_AUTO_NEG_EN BIT(13)
+#define DP83811_SGMII_TX_ERR_DIS BIT(14)
+#define DP83811_SGMII_SOFT_RESET BIT(15)
+
+static int dp83811_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, MII_DP83811_INT_STAT1);
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_DP83811_INT_STAT2);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83811_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ u16 value;
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ /* MAC addresses start with byte 5, but stored in mac[0].
+ * 811 PHYs store bytes 4|5, 2|3, 0|1
+ */
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA1,
+ (mac[1] << 8) | mac[0]);
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA2,
+ (mac[3] << 8) | mac[2]);
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA3,
+ (mac[5] << 8) | mac[4]);
+
+ value = phy_read_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_WOL_CFG);
+ if (wol->wolopts & WAKE_MAGIC)
+ value |= DP83811_WOL_MAGIC_EN;
+ else
+ value &= ~DP83811_WOL_MAGIC_EN;
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ phy_write_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ phy_write_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP2,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ phy_write_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP3,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+ value |= DP83811_WOL_SECURE_ON;
+ } else {
+ value &= ~DP83811_WOL_SECURE_ON;
+ }
+
+ value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
+ DP83811_WOL_CLR_INDICATION);
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ value);
+ } else {
+ value = phy_read_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_WOL_CFG);
+ value &= ~DP83811_WOL_EN;
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ value);
+ }
+
+ return 0;
+}
+
+static void dp83811_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ u16 sopass_val;
+ int value;
+
+ wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG);
+
+ if (value & DP83811_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83811_WOL_SECURE_ON) {
+ sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP1);
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP2);
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR,
+ MII_DP83811_RXSOP3);
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ /* WoL is not enabled so set wolopts to 0 */
+ if (!(value & DP83811_WOL_EN))
+ wol->wolopts = 0;
+}
+
+static int dp83811_config_intr(struct phy_device *phydev)
+{
+ int misr_status, err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ misr_status = phy_read(phydev, MII_DP83811_INT_STAT1);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83811_RX_ERR_HF_INT_EN |
+ DP83811_MS_TRAINING_INT_EN |
+ DP83811_ANEG_COMPLETE_INT_EN |
+ DP83811_ESD_EVENT_INT_EN |
+ DP83811_WOL_INT_EN |
+ DP83811_LINK_STAT_INT_EN |
+ DP83811_ENERGY_DET_INT_EN |
+ DP83811_LINK_QUAL_INT_EN);
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT1, misr_status);
+ if (err < 0)
+ return err;
+
+ misr_status = phy_read(phydev, MII_DP83811_INT_STAT2);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83811_JABBER_DET_INT_EN |
+ DP83811_POLARITY_INT_EN |
+ DP83811_SLEEP_MODE_INT_EN |
+ DP83811_OVERTEMP_INT_EN |
+ DP83811_OVERVOLTAGE_INT_EN |
+ DP83811_UNDERVOLTAGE_INT_EN);
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status);
+
+ } else {
+ err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+ }
+
+ return err;
+}
+
+static int dp83811_config_aneg(struct phy_device *phydev)
+{
+ int value, err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (DP83811_SGMII_AUTO_NEG_EN | value));
+ if (err < 0)
+ return err;
+ } else {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (~DP83811_SGMII_AUTO_NEG_EN & value));
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return genphy_config_aneg(phydev);
+}
+
+static int dp83811_config_init(struct phy_device *phydev)
+{
+ int value, err;
+
+ err = genphy_config_init(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
+ if (!(value & DP83811_SGMII_EN)) {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (DP83811_SGMII_EN | value));
+ if (err < 0)
+ return err;
+ } else {
+ err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+ (~DP83811_SGMII_EN & value));
+ if (err < 0)
+ return err;
+ }
+ }
+
+ value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
+
+ return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ value);
+}
+
+static int dp83811_phy_reset(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_DP83811_RESET_CTRL, DP83811_HW_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83811_suspend(struct phy_device *phydev)
+{
+ int value;
+
+ value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG);
+
+ if (!(value & DP83811_WOL_EN))
+ genphy_suspend(phydev);
+
+ return 0;
+}
+
+static int dp83811_resume(struct phy_device *phydev)
+{
+ int value;
+
+ genphy_resume(phydev);
+
+ value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG);
+
+ phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value |
+ DP83811_WOL_CLR_INDICATION);
+
+ return 0;
+}
+
+static struct phy_driver dp83811_driver[] = {
+ {
+ .phy_id = DP83TC811_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83TC811",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = dp83811_config_init,
+ .config_aneg = dp83811_config_aneg,
+ .soft_reset = dp83811_phy_reset,
+ .get_wol = dp83811_get_wol,
+ .set_wol = dp83811_set_wol,
+ .ack_interrupt = dp83811_ack_interrupt,
+ .config_intr = dp83811_config_intr,
+ .suspend = dp83811_suspend,
+ .resume = dp83811_resume,
+ },
+};
+module_phy_driver(dp83811_driver);
+
+static struct mdio_device_id __maybe_unused dp83811_tbl[] = {
+ { DP83TC811_PHY_ID, 0xfffffff0 },
+ { },
+};
+MODULE_DEVICE_TABLE(mdio, dp83811_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83TC811 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c22e8e383247..b8f57e9b9379 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
if (err < 0)
goto error;
+ /* If WOL event happened once, the LED[2] interrupt pin
+ * will not be cleared unless we reading the interrupt status
+ * register. If interrupts are in use, the normal interrupt
+ * handling will clear the WOL event. Clear the WOL event
+ * before enabling it if !phy_interrupt_is_valid()
+ */
+ if (!phy_interrupt_is_valid(phydev))
+ phy_read(phydev, MII_M1011_IEVENT);
+
/* Enable the WOL interrupt */
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
MII_88E1318S_PHY_CSIER_WOL_EIE);
@@ -1473,9 +1482,6 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
}
}
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
@@ -1485,7 +1491,7 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
val = phy_read_paged(phydev, stat.page, stat.reg);
if (val < 0) {
- ret = UINT64_MAX;
+ ret = U64_MAX;
} else {
val = val & ((1 << stat.bits) - 1);
priv->stats[i] += val;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 403b085f0a89..15352f987bdf 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -205,14 +205,6 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
-static int mdiobb_reset(struct mii_bus *bus)
-{
- struct mdiobb_ctrl *ctrl = bus->priv;
- if (ctrl->reset)
- ctrl->reset(bus);
- return 0;
-}
-
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
struct mii_bus *bus;
@@ -225,7 +217,6 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
- bus->reset = mdiobb_reset;
bus->priv = ctrl;
return bus;
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 1861f387820d..863496fa5d13 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -30,17 +30,20 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
struct mdio_board_info *bi))
{
struct mdio_board_entry *be;
+ struct mdio_board_entry *tmp;
struct mdio_board_info *bi;
int ret;
mutex_lock(&mdio_board_lock);
- list_for_each_entry(be, &mdio_board_list, list) {
+ list_for_each_entry_safe(be, tmp, &mdio_board_list, list) {
bi = &be->board_info;
if (strcmp(bus->id, bi->bus_id))
continue;
+ mutex_unlock(&mdio_board_lock);
ret = cb(bus, bi);
+ mutex_lock(&mdio_board_lock);
if (ret)
continue;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 4333c6e14742..4e4c8daf44c3 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -24,8 +24,10 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/mdio-gpio.h>
#include <linux/gpio.h>
-#include <linux/platform_data/mdio-gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
@@ -35,37 +37,22 @@ struct mdio_gpio_info {
struct gpio_desc *mdc, *mdio, *mdo;
};
-static void *mdio_gpio_of_get_data(struct platform_device *pdev)
+static int mdio_gpio_get_data(struct device *dev,
+ struct mdio_gpio_info *bitbang)
{
- struct device_node *np = pdev->dev.of_node;
- struct mdio_gpio_platform_data *pdata;
- enum of_gpio_flags flags;
- int ret;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- ret = of_get_gpio_flags(np, 0, &flags);
- if (ret < 0)
- return NULL;
-
- pdata->mdc = ret;
- pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- ret = of_get_gpio_flags(np, 1, &flags);
- if (ret < 0)
- return NULL;
- pdata->mdio = ret;
- pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- ret = of_get_gpio_flags(np, 2, &flags);
- if (ret > 0) {
- pdata->mdo = ret;
- pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
- }
-
- return pdata;
+ bitbang->mdc = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDC,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(bitbang->mdc))
+ return PTR_ERR(bitbang->mdc);
+
+ bitbang->mdio = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDIO,
+ GPIOD_IN);
+ if (IS_ERR(bitbang->mdio))
+ return PTR_ERR(bitbang->mdio);
+
+ bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, MDIO_GPIO_MDO,
+ GPIOD_OUT_LOW);
+ return PTR_ERR_OR_ZERO(bitbang->mdo);
}
static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
@@ -125,78 +112,28 @@ static const struct mdiobb_ops mdio_gpio_ops = {
};
static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
- struct mdio_gpio_platform_data *pdata,
+ struct mdio_gpio_info *bitbang,
int bus_id)
{
struct mii_bus *new_bus;
- struct mdio_gpio_info *bitbang;
- int i;
- int mdc, mdio, mdo;
- unsigned long mdc_flags = GPIOF_OUT_INIT_LOW;
- unsigned long mdio_flags = GPIOF_DIR_IN;
- unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH;
-
- bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
- if (!bitbang)
- goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
- bitbang->ctrl.reset = pdata->reset;
- mdc = pdata->mdc;
- bitbang->mdc = gpio_to_desc(mdc);
- if (pdata->mdc_active_low)
- mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW;
- mdio = pdata->mdio;
- bitbang->mdio = gpio_to_desc(mdio);
- if (pdata->mdio_active_low)
- mdio_flags |= GPIOF_ACTIVE_LOW;
- mdo = pdata->mdo;
- if (mdo) {
- bitbang->mdo = gpio_to_desc(mdo);
- if (pdata->mdo_active_low)
- mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW;
- }
new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!new_bus)
- goto out;
-
- new_bus->name = "GPIO Bitbanged MDIO",
+ return NULL;
- new_bus->phy_mask = pdata->phy_mask;
- new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
- memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq));
+ new_bus->name = "GPIO Bitbanged MDIO";
new_bus->parent = dev;
- if (new_bus->phy_mask == ~0)
- goto out_free_bus;
-
- for (i = 0; i < PHY_MAX_ADDR; i++)
- if (!new_bus->irq[i])
- new_bus->irq[i] = PHY_POLL;
-
if (bus_id != -1)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
else
strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
- if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc"))
- goto out_free_bus;
-
- if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio"))
- goto out_free_bus;
-
- if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo"))
- goto out_free_bus;
-
dev_set_drvdata(dev, new_bus);
return new_bus;
-
-out_free_bus:
- free_mdio_bitbang(new_bus);
-out:
- return NULL;
}
static void mdio_gpio_bus_deinit(struct device *dev)
@@ -216,34 +153,33 @@ static void mdio_gpio_bus_destroy(struct device *dev)
static int mdio_gpio_probe(struct platform_device *pdev)
{
- struct mdio_gpio_platform_data *pdata;
+ struct mdio_gpio_info *bitbang;
struct mii_bus *new_bus;
int ret, bus_id;
+ bitbang = devm_kzalloc(&pdev->dev, sizeof(*bitbang), GFP_KERNEL);
+ if (!bitbang)
+ return -ENOMEM;
+
+ ret = mdio_gpio_get_data(&pdev->dev, bitbang);
+ if (ret)
+ return ret;
+
if (pdev->dev.of_node) {
- pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
if (bus_id < 0) {
dev_warn(&pdev->dev, "failed to get alias id\n");
bus_id = 0;
}
} else {
- pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
}
- if (!pdata)
- return -ENODEV;
-
- new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
+ new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, bus_id);
if (!new_bus)
return -ENODEV;
- if (pdev->dev.of_node)
- ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
- else
- ret = mdiobus_register(new_bus);
-
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
if (ret)
mdio_gpio_bus_deinit(&pdev->dev);
diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c
new file mode 100644
index 000000000000..badbc99bedd3
--- /dev/null
+++ b/drivers/net/phy/mdio-mscc-miim.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Alexandre Belloni <alexandre.belloni@bootlin.com>
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_mdio.h>
+
+#define MSCC_MIIM_REG_STATUS 0x0
+#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3)
+#define MSCC_MIIM_REG_CMD 0x8
+#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
+#define MSCC_MIIM_CMD_OPR_READ BIT(2)
+#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
+#define MSCC_MIIM_CMD_REGAD_SHIFT 20
+#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
+#define MSCC_MIIM_CMD_VLD BIT(31)
+#define MSCC_MIIM_REG_DATA 0xC
+#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
+
+#define MSCC_PHY_REG_PHY_CFG 0x0
+#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PHY_CFG_PHY_COMMON_RESET BIT(4)
+#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
+#define MSCC_PHY_REG_PHY_STATUS 0x4
+
+struct mscc_miim_dev {
+ void __iomem *regs;
+ void __iomem *phy_regs;
+};
+
+static int mscc_miim_wait_ready(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ u32 val;
+
+ readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val,
+ !(val & MSCC_MIIM_STATUS_STAT_BUSY), 100, 250000);
+ if (val & MSCC_MIIM_STATUS_STAT_BUSY)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ u32 val;
+ int ret;
+
+ ret = mscc_miim_wait_ready(bus);
+ if (ret)
+ goto out;
+
+ writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+ (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ,
+ miim->regs + MSCC_MIIM_REG_CMD);
+
+ ret = mscc_miim_wait_ready(bus);
+ if (ret)
+ goto out;
+
+ val = readl(miim->regs + MSCC_MIIM_REG_DATA);
+ if (val & MSCC_MIIM_DATA_ERROR) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = val & 0xFFFF;
+out:
+ return ret;
+}
+
+static int mscc_miim_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+ int ret;
+
+ ret = mscc_miim_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+ (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
+ (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
+ MSCC_MIIM_CMD_OPR_WRITE,
+ miim->regs + MSCC_MIIM_REG_CMD);
+
+out:
+ return ret;
+}
+
+static int mscc_miim_reset(struct mii_bus *bus)
+{
+ struct mscc_miim_dev *miim = bus->priv;
+
+ if (miim->phy_regs) {
+ writel(0, miim->phy_regs + MSCC_PHY_REG_PHY_CFG);
+ writel(0x1ff, miim->phy_regs + MSCC_PHY_REG_PHY_CFG);
+ mdelay(500);
+ }
+
+ return 0;
+}
+
+static int mscc_miim_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct mii_bus *bus;
+ struct mscc_miim_dev *dev;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "mscc_miim";
+ bus->read = mscc_miim_read;
+ bus->write = mscc_miim_write;
+ bus->reset = mscc_miim_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ dev = bus->priv;
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs)) {
+ dev_err(&pdev->dev, "Unable to map MIIM registers\n");
+ return PTR_ERR(dev->regs);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->phy_regs)) {
+ dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ return PTR_ERR(dev->phy_regs);
+ }
+ }
+
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int mscc_miim_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id mscc_miim_match[] = {
+ { .compatible = "mscc,ocelot-miim" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_miim_match);
+
+static struct platform_driver mscc_miim_driver = {
+ .probe = mscc_miim_probe,
+ .remove = mscc_miim_remove,
+ .driver = {
+ .name = "mscc-miim",
+ .of_match_table = mscc_miim_match,
+ },
+};
+
+module_platform_driver(mscc_miim_driver);
+
+MODULE_DESCRIPTION("Microsemi MIIM driver");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index f41b224a9cdb..3db06b40580d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
ksz9031_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4);
+
+ /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+ * When the device links in the 1000BASE-T slave mode only,
+ * the optional 125MHz reference output clock (CLK125_NDO)
+ * has wide duty cycle variation.
+ *
+ * The optional CLK125_NDO clock does not meet the RGMII
+ * 45/55 percent (min/max) duty cycle requirement and therefore
+ * cannot be used directly by the MAC side for clocking
+ * applications that have setup/hold time requirements on
+ * rising and falling clock edges.
+ *
+ * Workaround:
+ * Force the phy to be the master to receive a stable clock
+ * which meets the duty cycle requirement.
+ */
+ if (of_property_read_bool(of_node, "micrel,force-master")) {
+ result = phy_read(phydev, MII_CTRL1000);
+ if (result < 0)
+ goto err_force_master;
+
+ /* enable master mode, config & prefer master */
+ result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
+ result = phy_write(phydev, MII_CTRL1000, result);
+ if (result < 0)
+ goto err_force_master;
+ }
}
return ksz9031_center_flp_timing(phydev);
+
+err_force_master:
+ phydev_err(phydev, "failed to force the phy to master mode\n");
+ return result;
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -650,9 +681,6 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
}
}
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
static u64 kszphy_get_stat(struct phy_device *phydev, int i)
{
struct kszphy_hw_stat stat = kszphy_hw_stats[i];
@@ -662,7 +690,7 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i)
val = phy_read(phydev, stat.reg);
if (val < 0) {
- ret = UINT64_MAX;
+ ret = U64_MAX;
} else {
val = val & ((1 << stat.bits) - 1);
priv->stats[i] += val;
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0f293ef28935..2d67937866a3 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -20,6 +20,9 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/microchipphy.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <dt-bindings/net/microchip-lan78xx.h>
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "Microchip LAN88XX PHY driver"
@@ -30,6 +33,16 @@ struct lan88xx_priv {
__u32 wolopts;
};
+static int lan88xx_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
+}
+
+static int lan88xx_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
+}
+
static int lan88xx_phy_config_intr(struct phy_device *phydev)
{
int rc;
@@ -66,10 +79,156 @@ static int lan88xx_suspend(struct phy_device *phydev)
return 0;
}
+static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
+ u32 data)
+{
+ int val, save_page, ret = 0;
+ u16 buf;
+
+ /* Save current page */
+ save_page = phy_save_page(phydev);
+ if (save_page < 0) {
+ pr_warn("Failed to get current page\n");
+ goto err;
+ }
+
+ /* Switch to TR page */
+ lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
+ (data & 0xFFFF));
+ if (ret < 0) {
+ pr_warn("Failed to write TR low data\n");
+ goto err;
+ }
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
+ (data & 0x00FF0000) >> 16);
+ if (ret < 0) {
+ pr_warn("Failed to write TR high data\n");
+ goto err;
+ }
+
+ /* Config control bits [15:13] of register */
+ buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
+ buf |= 0x8000; /* Set [15] to Packet transmit */
+
+ ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
+ if (ret < 0) {
+ pr_warn("Failed to write data in reg\n");
+ goto err;
+ }
+
+ usleep_range(1000, 2000);/* Wait for Data to be written */
+ val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
+ if (!(val & 0x8000))
+ pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
+err:
+ return phy_restore_page(phydev, save_page, ret);
+}
+
+static void lan88xx_config_TR_regs(struct phy_device *phydev)
+{
+ int err;
+
+ /* Get access to Channel 0x1, Node 0xF , Register 0x01.
+ * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
+ * MrvlTrFix1000Kp, MasterEnableTR bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0F82]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x06.
+ * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
+ * SSTrKp1000Mas bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x168C]\n");
+
+ /* Get access to Channel b'10, Node b'1111, Register 0x11.
+ * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
+ * bits
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x17A2]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x10.
+ * Write 24-bit value 0xEEFFDD to register. Setting
+ * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
+ * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A0]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x13.
+ * Write 24-bit value 0x071448 to register. Setting
+ * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A6]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x12.
+ * Write 24-bit value 0x13132F to register. Setting
+ * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A4]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x14.
+ * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
+ * eee_TrKf_freeze_delay bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x16A8]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x34.
+ * Write 24-bit value 0x91B06C to register. Setting
+ * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
+ * FastMseSearchUpdGain1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FE8]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x3E.
+ * Write 24-bit value 0xC0A028 to register. Setting
+ * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
+ * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FFC]\n");
+
+ /* Get access to Channel b'01, Node b'1111, Register 0x35.
+ * Write 24-bit value 0x041600 to register. Setting
+ * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
+ * FastMsePhChangeDelay1000 bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x0FEA]\n");
+
+ /* Get access to Channel b'10, Node b'1101, Register 0x03.
+ * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
+ */
+ err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
+ if (err < 0)
+ pr_warn("Failed to Set Register[0x1686]\n");
+}
+
static int lan88xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct lan88xx_priv *priv;
+ u32 led_modes[4];
+ int len;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -77,6 +236,27 @@ static int lan88xx_probe(struct phy_device *phydev)
priv->wolopts = 0;
+ len = of_property_read_variable_u32_array(dev->of_node,
+ "microchip,led-modes",
+ led_modes,
+ 0,
+ ARRAY_SIZE(led_modes));
+ if (len >= 0) {
+ u32 reg = 0;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (led_modes[i] > 15)
+ return -EINVAL;
+ reg |= led_modes[i] << (i * 4);
+ }
+ for (; i < ARRAY_SIZE(led_modes); i++)
+ reg |= LAN78XX_FORCE_LED_OFF << (i * 4);
+ (void)phy_write(phydev, LAN78XX_PHY_LED_MODE_SELECT, reg);
+ } else if (len == -EOVERFLOW) {
+ return -EINVAL;
+ }
+
/* these values can be used to identify internal PHY */
priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID);
priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV);
@@ -132,6 +312,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
}
+static int lan88xx_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ genphy_config_init(phydev);
+ /*Zerodetect delay enable */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
+ val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
+
+ phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
+ val);
+
+ /* Config DSP registers */
+ lan88xx_config_TR_regs(phydev);
+
+ return 0;
+}
+
static int lan88xx_config_aneg(struct phy_device *phydev)
{
lan88xx_set_mdix(phydev);
@@ -151,7 +350,7 @@ static struct phy_driver microchip_phy_driver[] = {
.probe = lan88xx_probe,
.remove = lan88xx_remove,
- .config_init = genphy_config_init,
+ .config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
.ack_interrupt = lan88xx_phy_ack_interrupt,
@@ -160,6 +359,8 @@ static struct phy_driver microchip_phy_driver[] = {
.suspend = lan88xx_suspend,
.resume = genphy_resume,
.set_wol = lan88xx_set_wol,
+ .read_page = lan88xx_read_page,
+ .write_page = lan88xx_write_page,
} };
module_phy_driver(microchip_phy_driver);
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
new file mode 100644
index 000000000000..b1917dd1978a
--- /dev/null
+++ b/drivers/net/phy/microchip_t1.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Microchip Technology
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+/* Interrupt Source Register */
+#define LAN87XX_INTERRUPT_SOURCE (0x18)
+
+/* Interrupt Mask Register */
+#define LAN87XX_INTERRUPT_MASK (0x19)
+#define LAN87XX_MASK_LINK_UP (0x0004)
+#define LAN87XX_MASK_LINK_DOWN (0x0002)
+
+#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
+#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+
+static int lan87xx_phy_config_intr(struct phy_device *phydev)
+{
+ int rc, val = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* unmask all source and clear them before enable */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ }
+
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+
+ return rc < 0 ? rc : 0;
+}
+
+static struct phy_driver microchip_t1_phy_driver[] = {
+ {
+ .phy_id = 0x0007c150,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Microchip LAN87xx T1",
+
+ .features = SUPPORTED_100baseT_Full,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .config_init = genphy_config_init,
+ .config_aneg = genphy_config_aneg,
+
+ .ack_interrupt = lan87xx_phy_ack_interrupt,
+ .config_intr = lan87xx_phy_config_intr,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }
+};
+
+module_phy_driver(microchip_t1_phy_driver);
+
+static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
+ { 0x0007c150, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, microchip_t1_tbl);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ac23322a32e1..9e4ba8e80a18 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -535,8 +535,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR1, and put them in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
- if (phy_reg < 0)
+ if (phy_reg < 0) {
+ /* if there is no device, return without an error so scanning
+ * the bus works properly
+ */
+ if (phy_reg == -EIO || phy_reg == -ENODEV) {
+ *phy_id = 0xffffffff;
+ return 0;
+ }
+
return -EIO;
+ }
*phy_id = (phy_reg & 0xffff) << 16;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index c582b2d7546c..af4dc4425be2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -19,6 +19,7 @@
#include <linux/phylink.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
#include "sfp.h"
@@ -54,6 +55,7 @@ struct phylink {
/* The link configuration settings */
struct phylink_link_state link_config;
struct gpio_desc *link_gpio;
+ struct timer_list link_poll;
void (*get_fixed_state)(struct net_device *dev,
struct phylink_link_state *s);
@@ -360,7 +362,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
if (pl->get_fixed_state)
pl->get_fixed_state(pl->netdev, state);
else if (pl->link_gpio)
- state->link = !!gpiod_get_value(pl->link_gpio);
+ state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
}
/* Flow control is resolved according to our and the link partners
@@ -500,6 +502,15 @@ static void phylink_run_resolve(struct phylink *pl)
queue_work(system_power_efficient_wq, &pl->resolve);
}
+static void phylink_fixed_poll(struct timer_list *t)
+{
+ struct phylink *pl = container_of(t, struct phylink, link_poll);
+
+ mod_timer(t, jiffies + HZ);
+
+ phylink_run_resolve(pl);
+}
+
static const struct sfp_upstream_ops sfp_phylink_ops;
static int phylink_register_sfp(struct phylink *pl,
@@ -572,6 +583,7 @@ struct phylink *phylink_create(struct net_device *ndev,
pl->link_config.an_enabled = true;
pl->ops = ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
+ timer_setup(&pl->link_poll, phylink_fixed_poll, 0);
bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
linkmode_copy(pl->link_config.advertising, pl->supported);
@@ -612,6 +624,8 @@ void phylink_destroy(struct phylink *pl)
{
if (pl->sfp_bus)
sfp_unregister_upstream(pl->sfp_bus);
+ if (!IS_ERR_OR_NULL(pl->link_gpio))
+ gpiod_put(pl->link_gpio);
cancel_work_sync(&pl->resolve);
kfree(pl);
@@ -903,6 +917,8 @@ void phylink_start(struct phylink *pl)
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
+ if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ mod_timer(&pl->link_poll, jiffies + HZ);
if (pl->sfp_bus)
sfp_upstream_start(pl->sfp_bus);
if (pl->phydev)
@@ -927,6 +943,8 @@ void phylink_stop(struct phylink *pl)
phy_stop(pl->phydev);
if (pl->sfp_bus)
sfp_upstream_stop(pl->sfp_bus);
+ if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ del_timer_sync(&pl->link_poll);
set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
queue_work(system_power_efficient_wq, &pl->resolve);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 0381da78d228..d437f4f5ed52 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -125,13 +125,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.br_nominal) {
if (id->base.br_nominal != 255) {
br_nom = id->base.br_nominal * 100;
- br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+ br_min = br_nom - id->base.br_nominal * id->ext.br_min;
br_max = br_nom + id->base.br_nominal * id->ext.br_max;
} else if (id->ext.br_max) {
br_nom = 250 * id->ext.br_max;
br_max = br_nom + br_nom * id->ext.br_min / 100;
br_min = br_nom - br_nom * id->ext.br_min / 100;
}
+
+ /* When using passive cables, in case neither BR,min nor BR,max
+ * are specified, set br_min to 0 as the nominal value is then
+ * used as the maximum.
+ */
+ if (br_min == br_max && id->base.sfp_ct_passive)
+ br_min = 0;
}
/* Set ethtool support from the compliance fields. */
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index be399d645224..c328208388da 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -168,9 +168,6 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data)
}
}
-#ifndef UINT64_MAX
-#define UINT64_MAX (u64)(~((u64)0))
-#endif
static u64 smsc_get_stat(struct phy_device *phydev, int i)
{
struct smsc_hw_stat stat = smsc_hw_stats[i];
@@ -179,7 +176,7 @@ static u64 smsc_get_stat(struct phy_device *phydev, int i)
val = phy_read(phydev, stat.reg);
if (val < 0)
- ret = UINT64_MAX;
+ ret = U64_MAX;
else
ret = val;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1483bc7b01e1..7df07337d69c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a6c6ce19eeee..9dbd390ace34 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
@@ -2918,7 +2942,7 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_CHANGE:
if (netif_running(port->dev))
team_port_change_check(port,
- !!netif_carrier_ok(port->dev));
+ !!netif_oper_up(port->dev));
break;
case NETDEV_UNREGISTER:
team_del_slave(port->team->dev, dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1e58be152d5c..99bf3cee1345 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -525,11 +525,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_lock();
- /* We may get a very small possibility of OOO during switching, not
- * worth to optimize.*/
- if (tun->numqueues == 1 || tfile->detached)
- goto unlock;
-
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
@@ -548,7 +543,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
spin_unlock_bh(&tun->lock);
}
-unlock:
rcu_read_unlock();
}
@@ -681,15 +675,6 @@ static void tun_queue_purge(struct tun_file *tfile)
skb_queue_purge(&tfile->sk.sk_error_queue);
}
-static void tun_cleanup_tx_ring(struct tun_file *tfile)
-{
- if (tfile->tx_ring.queue) {
- ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- xdp_rxq_info_unreg(&tfile->xdp_rxq);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
- }
-}
-
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -736,7 +721,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- tun_cleanup_tx_ring(tfile);
+ if (tun)
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
sock_put(&tfile->sk);
}
}
@@ -783,14 +770,14 @@ static void tun_detach_all(struct net_device *dev)
tun_napi_del(tun, tfile);
/* Drop read queue */
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
@@ -834,7 +821,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}
if (!tfile->detached &&
- ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
+ GFP_KERNEL, tun_ptr_free)) {
err = -ENOMEM;
goto out;
}
@@ -1108,12 +1096,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
len = run_ebpf_filter(tun, skb, len);
-
- /* Trim extra bytes since we may insert vlan proto & TCI
- * in tun_put_user().
- */
- len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
- if (len <= 0 || pskb_trim(skb, len))
+ if (len == 0 || pskb_trim(skb, len))
goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
@@ -1696,6 +1679,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
return NULL;
case XDP_PASS:
delta = orig_data - xdp.data;
+ len = xdp.data_end - xdp.data;
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -1716,7 +1700,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
}
skb_reserve(skb, pad - delta);
- skb_put(skb, len + delta);
+ skb_put(skb, len);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
@@ -1937,10 +1921,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_unlock();
}
- rcu_read_lock();
- if (!rcu_dereference(tun->steering_prog))
+ /* Compute the costly rx hash only if needed for flow updates.
+ * We may get a very small possibility of OOO during switching, not
+ * worth to optimize.
+ */
+ if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
+ !tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- rcu_read_unlock();
if (frags) {
/* Exercise flow dissector code path. */
@@ -2857,10 +2844,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
+ struct net *net = sock_net(&tfile->sk);
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
- struct net *net;
kuid_t owner;
kgid_t group;
int sndbuf;
@@ -2884,14 +2871,18 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
*/
return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
(unsigned int __user*)argp);
- } else if (cmd == TUNSETQUEUE)
+ } else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
+ } else if (cmd == SIOCGSKNS) {
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+ return open_related_ns(&net->ns, get_net_ns);
+ }
ret = 0;
rtnl_lock();
tun = tun_get(tfile);
- net = sock_net(&tfile->sk);
if (cmd == TUNSETIFF) {
ret = -EEXIST;
if (tun)
@@ -2921,14 +2912,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tfile->ifindex = ifindex;
goto unlock;
}
- if (cmd == SIOCGSKNS) {
- ret = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto unlock;
-
- ret = open_related_ns(&net->ns, get_net_ns);
- goto unlock;
- }
ret = -EBADFD;
if (!tun)
@@ -3232,6 +3215,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
+ if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
+ sk_free(&tfile->sk);
+ return -ENOMEM;
+ }
+
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
@@ -3252,8 +3240,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-
return 0;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index f28bd74ac275..418b0904cecb 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -111,6 +111,7 @@ config USB_LAN78XX
select MII
select PHYLIB
select MICROCHIP_PHY
+ select FIXED_PHY
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 0867f7275852..8dff87ec6d99 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,13 +36,14 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/microchipphy.h>
-#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include "lan78xx.h"
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx"
-#define DRIVER_VERSION "1.0.6"
#define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8)
@@ -278,6 +279,30 @@ struct lan78xx_statstage64 {
u64 eee_tx_lpi_time;
};
+static u32 lan78xx_regs[] = {
+ ID_REV,
+ INT_STS,
+ HW_CFG,
+ PMT_CTL,
+ E2P_CMD,
+ E2P_DATA,
+ USB_STATUS,
+ VLAN_TYPE,
+ MAC_CR,
+ MAC_RX,
+ MAC_TX,
+ FLOW,
+ ERR_STS,
+ MII_ACC,
+ MII_DATA,
+ EEE_TX_LPI_REQ_DLY,
+ EEE_TW_TX_SYS,
+ EEE_TX_LPI_REM_DLY,
+ WUCSR
+};
+
+#define PHY_REG_SIZE (32 * sizeof(u32))
+
struct lan78xx_net;
struct lan78xx_priv {
@@ -1477,7 +1502,6 @@ static void lan78xx_get_drvinfo(struct net_device *net,
struct lan78xx_net *dev = netdev_priv(net);
strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
@@ -1605,6 +1629,34 @@ exit:
return ret;
}
+static int lan78xx_get_regs_len(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return (sizeof(lan78xx_regs));
+ else
+ return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
+}
+
+static void
+lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *buf)
+{
+ u32 *data = buf;
+ int i, j;
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ /* Read Device/MAC registers */
+ for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
+ lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+
+ if (!netdev->phydev)
+ return;
+
+ /* Read PHY registers */
+ for (j = 0; j < 32; i++, j++)
+ data[i] = phy_read(netdev->phydev, j);
+}
+
static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_link = lan78xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -1625,6 +1677,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_pauseparam = lan78xx_set_pause,
.get_link_ksettings = lan78xx_get_link_ksettings,
.set_link_ksettings = lan78xx_set_link_ksettings,
+ .get_regs_len = lan78xx_get_regs_len,
+ .get_regs = lan78xx_get_regs,
};
static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1652,34 +1706,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
addr[5] = (addr_hi >> 8) & 0xFF;
if (!is_valid_ether_addr(addr)) {
- /* reading mac address from EEPROM or OTP */
- if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- addr) == 0) ||
- (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- addr) == 0)) {
- if (is_valid_ether_addr(addr)) {
- /* eeprom values are valid so use them */
- netif_dbg(dev, ifup, dev->net,
- "MAC address read from EEPROM");
- } else {
- /* generate random MAC */
- random_ether_addr(addr);
- netif_dbg(dev, ifup, dev->net,
- "MAC address set to random addr");
- }
-
- addr_lo = addr[0] | (addr[1] << 8) |
- (addr[2] << 16) | (addr[3] << 24);
- addr_hi = addr[4] | (addr[5] << 8);
-
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
+ /* valid address present in Device Tree */
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from Device Tree");
+ } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
+ ETH_ALEN, addr) == 0) ||
+ (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
+ ETH_ALEN, addr) == 0)) &&
+ is_valid_ether_addr(addr)) {
+ /* eeprom values are valid so use them */
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from EEPROM");
} else {
/* generate random MAC */
random_ether_addr(addr);
netif_dbg(dev, ifup, dev->net,
"MAC address set to random addr");
}
+
+ addr_lo = addr[0] | (addr[1] << 8) |
+ (addr[2] << 16) | (addr[3] << 24);
+ addr_hi = addr[4] | (addr[5] << 8);
+
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
}
ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
@@ -1762,6 +1813,7 @@ done:
static int lan78xx_mdio_init(struct lan78xx_net *dev)
{
+ struct device_node *node;
int ret;
dev->mdiobus = mdiobus_alloc();
@@ -1790,7 +1842,10 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
break;
}
- ret = mdiobus_register(dev->mdiobus);
+ node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
+ ret = of_mdiobus_register(dev->mdiobus, node);
+ if (node)
+ of_node_put(node);
if (ret) {
netdev_err(dev->net, "can't register MDIO bus\n");
goto exit1;
@@ -2003,52 +2058,91 @@ static int ksz9031rnx_fixup(struct phy_device *phydev)
return 1;
}
-static int lan78xx_phy_init(struct lan78xx_net *dev)
+static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
{
+ u32 buf;
int ret;
- u32 mii_adv;
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ };
struct phy_device *phydev;
phydev = phy_find_first(dev->mdiobus);
if (!phydev) {
- netdev_err(dev->net, "no PHY found\n");
- return -EIO;
- }
-
- if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
- (dev->chipid == ID_REV_CHIP_ID_7850_)) {
- phydev->is_internal = true;
- dev->interface = PHY_INTERFACE_MODE_GMII;
-
- } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
+ NULL);
+ if (IS_ERR(phydev)) {
+ netdev_err(dev->net, "No PHY/fixed_PHY found\n");
+ return NULL;
+ }
+ netdev_dbg(dev->net, "Registered FIXED PHY\n");
+ dev->interface = PHY_INTERFACE_MODE_RGMII;
+ ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
+ MAC_RGMII_ID_TXC_DELAY_EN_);
+ ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+ buf |= HW_CFG_CLK125_EN_;
+ buf |= HW_CFG_REFCLK25_EN_;
+ ret = lan78xx_write_reg(dev, HW_CFG, buf);
+ } else {
if (!phydev->drv) {
netdev_err(dev->net, "no PHY driver found\n");
- return -EIO;
+ return NULL;
}
-
dev->interface = PHY_INTERFACE_MODE_RGMII;
-
/* external PHY fixup for KSZ9031RNX */
ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
ksz9031rnx_fixup);
if (ret < 0) {
- netdev_err(dev->net, "fail to register fixup\n");
- return ret;
+ netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
+ return NULL;
}
/* external PHY fixup for LAN8835 */
ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
lan8835_fixup);
if (ret < 0) {
- netdev_err(dev->net, "fail to register fixup\n");
- return ret;
+ netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
+ return NULL;
}
/* add more external PHY fixup here if needed */
phydev->is_internal = false;
- } else {
- netdev_err(dev->net, "unknown ID found\n");
- ret = -EIO;
- goto error;
+ }
+ return phydev;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+ int ret;
+ u32 mii_adv;
+ struct phy_device *phydev;
+
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7801_:
+ phydev = lan7801_phy_init(dev);
+ if (!phydev) {
+ netdev_err(dev->net, "lan7801: PHY Init Failed");
+ return -EIO;
+ }
+ break;
+
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
+ phydev = phy_find_first(dev->mdiobus);
+ if (!phydev) {
+ netdev_err(dev->net, "no PHY found\n");
+ return -EIO;
+ }
+ phydev->is_internal = true;
+ dev->interface = PHY_INTERFACE_MODE_GMII;
+ break;
+
+ default:
+ netdev_err(dev->net, "Unknown CHIP ID found\n");
+ return -EIO;
}
/* if phyirq is not set, use polling mode in phylib */
@@ -2067,6 +2161,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (ret) {
netdev_err(dev->net, "can't attach PHY to %s\n",
dev->mdiobus->id);
+ if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ if (phy_is_pseudo_fixed_link(phydev)) {
+ fixed_phy_unregister(phydev);
+ } else {
+ phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
+ 0xfffffff0);
+ phy_unregister_fixup_for_uid(PHY_LAN8835,
+ 0xfffffff0);
+ }
+ }
return -EIO;
}
@@ -2079,17 +2183,33 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ if (phydev->mdio.dev.of_node) {
+ u32 reg;
+ int len;
+
+ len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
+ "microchip,led-modes",
+ sizeof(u32));
+ if (len >= 0) {
+ /* Ensure the appropriate LEDs are enabled */
+ lan78xx_read_reg(dev, HW_CFG, &reg);
+ reg &= ~(HW_CFG_LED0_EN_ |
+ HW_CFG_LED1_EN_ |
+ HW_CFG_LED2_EN_ |
+ HW_CFG_LED3_EN_);
+ reg |= (len > 0) * HW_CFG_LED0_EN_ |
+ (len > 1) * HW_CFG_LED1_EN_ |
+ (len > 2) * HW_CFG_LED2_EN_ |
+ (len > 3) * HW_CFG_LED3_EN_;
+ lan78xx_write_reg(dev, HW_CFG, reg);
+ }
+ }
+
genphy_config_aneg(phydev);
dev->fc_autoneg = phydev->autoneg;
return 0;
-
-error:
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
-
- return ret;
}
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
@@ -3487,6 +3607,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
struct lan78xx_net *dev;
struct usb_device *udev;
struct net_device *net;
+ struct phy_device *phydev;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -3495,12 +3616,16 @@ static void lan78xx_disconnect(struct usb_interface *intf)
udev = interface_to_usbdev(intf);
net = dev->net;
+ phydev = net->phydev;
phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
phy_disconnect(net->phydev);
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
+
unregister_netdev(net);
cancel_delayed_work_sync(&dev->wq);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ca066b785e9f..42565dd33aa6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1107,6 +1108,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -1342,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
id->driver_info = (unsigned long)&qmi_wwan_info;
}
+ /* There are devices where the same interface number can be
+ * configured as different functions. We should only bind to
+ * vendor specific functions when matching on interface number
+ */
+ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+ dev_dbg(&intf->dev,
+ "Rejecting interface number match for class %02x\n",
+ desc->bInterfaceClass);
+ return -ENODEV;
+ }
+
/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 01694e26f03e..f34794a76c4d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -147,6 +147,17 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
};
+/* Control VQ buffers: protected by the rtnl lock */
+struct control_buf {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ struct virtio_net_ctrl_mq mq;
+ u8 promisc;
+ u8 allmulti;
+ __virtio16 vid;
+ __virtio64 offloads;
+};
+
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *cvq;
@@ -192,14 +203,7 @@ struct virtnet_info {
struct hlist_node node;
struct hlist_node node_dead;
- /* Control VQ buffers: protected by the rtnl lock */
- struct virtio_net_ctrl_hdr ctrl_hdr;
- virtio_net_ctrl_ack ctrl_status;
- struct virtio_net_ctrl_mq ctrl_mq;
- u8 ctrl_promisc;
- u8 ctrl_allmulti;
- u16 ctrl_vid;
- u64 ctrl_offloads;
+ struct control_buf *ctrl;
/* Ethtool settings */
u8 duplex;
@@ -606,6 +610,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
case XDP_PASS:
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
+ len = xdp.data_end - xdp.data;
break;
case XDP_TX:
xdpf = convert_to_xdp_frame(&xdp);
@@ -642,7 +647,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
goto err;
}
skb_reserve(skb, headroom - delta);
- skb_put(skb, len + delta);
+ skb_put(skb, len);
if (!delta) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
@@ -757,6 +762,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
offset = xdp.data -
page_address(xdp_page) - vi->hdr_len;
+ /* recalculate len if xdp.data or xdp.data_end were
+ * adjusted
+ */
+ len = xdp.data_end - xdp.data + vi->hdr_len;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
@@ -1277,7 +1286,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int received;
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct send_queue *sq;
+ unsigned int received, qp;
bool xdp_xmit = false;
virtnet_poll_cleantx(rq);
@@ -1288,8 +1299,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit)
+ if (xdp_xmit) {
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
+ smp_processor_id();
+ sq = &vi->sq[qp];
+ virtqueue_kick(sq->vq);
xdp_do_flush_map();
+ }
return received;
}
@@ -1469,25 +1485,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
- vi->ctrl_status = ~0;
- vi->ctrl_hdr.class = class;
- vi->ctrl_hdr.cmd = cmd;
+ vi->ctrl->status = ~0;
+ vi->ctrl->hdr.class = class;
+ vi->ctrl->hdr.cmd = cmd;
/* Add header */
- sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
+ sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
sgs[out_num++] = &hdr;
if (out)
sgs[out_num++] = out;
/* Add return status. */
- sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
+ sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
- return vi->ctrl_status == VIRTIO_NET_OK;
+ return vi->ctrl->status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -1496,7 +1512,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
!virtqueue_is_broken(vi->cvq))
cpu_relax();
- return vi->ctrl_status == VIRTIO_NET_OK;
+ return vi->ctrl->status == VIRTIO_NET_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1608,8 +1624,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
+ vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1668,22 +1684,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
- vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
+ vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
- sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
+ sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl_promisc ? "en" : "dis");
+ vi->ctrl->promisc ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
+ sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl_allmulti ? "en" : "dis");
+ vi->ctrl->allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
@@ -1729,8 +1745,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- vi->ctrl_vid = vid;
- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1744,8 +1760,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- vi->ctrl_vid = vid;
- sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
+ vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2141,9 +2157,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
struct scatterlist sg;
- vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
+ vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
+ sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -2366,6 +2382,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->rq);
kfree(vi->sq);
+ kfree(vi->ctrl);
}
static void _free_receive_bufs(struct virtnet_info *vi)
@@ -2558,6 +2575,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
+ vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
+ if (!vi->ctrl)
+ goto err_ctrl;
vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
if (!vi->sq)
goto err_sq;
@@ -2586,6 +2606,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
err_rq:
kfree(vi->sq);
err_sq:
+ kfree(vi->ctrl);
+err_ctrl:
return -ENOMEM;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e04937f44f33..e454dfc9ad8f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
+ /* Prevent any &gdesc->tcd field from being (speculatively)
+ * read before (&gdesc->tcd)->gen is read.
+ */
+ dma_rmb();
+
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
&gdesc->tcd), tq, adapter->pdev,
adapter);
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
+ /* Ensure that the write to (&gdesc->txd)->gen will be observed after
+ * all other writes to &gdesc->txd.
+ */
+ dma_wmb();
+
/* finally flips the GEN bit of the SOP desc. */
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
@@ -1218,6 +1228,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
union {
void *ptr;
struct ethhdr *eth;
+ struct vlan_ethhdr *veth;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
@@ -1228,16 +1239,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
return 0;
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ skb->protocol == cpu_to_be16(ETH_P_8021AD))
+ hlen = sizeof(struct vlan_ethhdr);
+ else
+ hlen = sizeof(struct ethhdr);
+
hdr.eth = eth_hdr(skb);
if (gdesc->rcd.v4) {
- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
- hdr.ptr += sizeof(struct ethhdr);
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
+ hdr.ptr += hlen;
BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
hlen = hdr.ipv4->ihl << 2;
hdr.ptr += hdr.ipv4->ihl << 2;
} else if (gdesc->rcd.v6) {
- BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
- hdr.ptr += sizeof(struct ethhdr);
+ BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
+ hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
+ hdr.ptr += hlen;
/* Use an estimated value, since we also need to handle
* TSO case.
*/
@@ -1289,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
*/
break;
}
+
+ /* Prevent any rcd field from being (speculatively) read before
+ * rcd->gen is read.
+ */
+ dma_rmb();
+
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
rcd->rqID != rq->dataRingQid);
idx = rcd->rxdIdx;
@@ -1519,6 +1544,12 @@ rcd_done:
ring->next2comp = idx;
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
+
+ /* Ensure that the writes to rxd->gen bits will be observed
+ * after all other writes to rxd objects.
+ */
+ dma_wmb();
+
while (num_to_alloc) {
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
&rxCmdDesc);
@@ -2679,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
/* ==================== initialization and cleanup routines ============ */
static int
-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
{
int err;
unsigned long mmio_start, mmio_len;
@@ -2691,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
return err;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed\n");
- err = -EIO;
- goto err_set_mask;
- }
- *dma64 = true;
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&pdev->dev,
- "pci_set_dma_mask failed\n");
- err = -EIO;
- goto err_set_mask;
- }
- *dma64 = false;
- }
-
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
dev_err(&pdev->dev,
"Failed to request region for adapter: error %d\n", err);
- goto err_set_mask;
+ goto err_enable_device;
}
pci_set_master(pdev);
@@ -2742,7 +2755,7 @@ err_bar1:
iounmap(adapter->hw_addr0);
err_ioremap:
pci_release_selected_regions(pdev, (1 << 2) - 1);
-err_set_mask:
+err_enable_device:
pci_disable_device(pdev);
return err;
}
@@ -2936,7 +2949,7 @@ vmxnet3_close(struct net_device *netdev)
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
vmxnet3_quiesce_dev(adapter);
@@ -2986,7 +2999,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
@@ -3245,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
};
int err;
- bool dma64 = false; /* stupid gcc */
+ bool dma64;
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
@@ -3291,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
+ err = -EIO;
+ goto err_set_mask;
+ }
+ dma64 = true;
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
+ err = -EIO;
+ goto err_set_mask;
+ }
+ dma64 = false;
+ }
+
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter),
@@ -3298,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
dev_err(&pdev->dev, "Failed to map dma\n");
err = -EFAULT;
- goto err_dma_map;
+ goto err_set_mask;
}
adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev,
@@ -3349,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
#endif /* VMXNET3_RSS */
- err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+ err = vmxnet3_alloc_pci_resources(adapter);
if (err < 0)
goto err_alloc_pci;
@@ -3495,7 +3526,7 @@ err_alloc_queue_desc:
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
-err_dma_map:
+err_set_mask:
free_netdev(netdev);
return err;
}
@@ -3558,7 +3589,7 @@ static void vmxnet3_shutdown_device(struct pci_dev *pdev)
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
&adapter->state)) {
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 2ff27314e047..559db051a500 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -600,7 +600,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 59ec34052a65..a2c554f8a61b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,12 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
-/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
+/* Each byte of this 32-bit integer encodes a version number in
+ * VMXNET3_DRIVER_VERSION_STRING.
+ */
+#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index deb5ae21a559..84f071ac0d84 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -4,12 +4,16 @@ config ATH10K
select ATH_COMMON
select CRC32
select WANT_DEV_COREDUMP
+ select ATH10K_CE
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
If you choose to build a module, it'll be called ath10k.
+config ATH10K_CE
+ bool
+
config ATH10K_PCI
tristate "Atheros ath10k PCI support"
depends on ATH10K && PCI
@@ -36,6 +40,14 @@ config ATH10K_USB
This module adds experimental support for USB bus. Currently
work in progress and will not fully work.
+config ATH10K_SNOC
+ tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ depends on ATH10K && ARCH_QCOM
+ ---help---
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC). Currently work in progress and will not
+ fully work.
+
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 6739ac26fd29..44d60a61b242 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -22,10 +22,10 @@ ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
+ath10k_core-$(CONFIG_ATH10K_CE) += ce.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
-ath10k_pci-y += pci.o \
- ce.o
+ath10k_pci-y += pci.o
ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
@@ -35,5 +35,8 @@ ath10k_sdio-y += sdio.o
obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o
+obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += snoc.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index b9def7bace2f..3b96a43fbda4 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +59,74 @@
* the buffer is sent/received.
*/
+static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ u32 ce_id = ce_state->id;
+ u32 addr = 0;
+
+ switch (ce_id) {
+ case 0:
+ addr = 0x00032000;
+ break;
+ case 3:
+ addr = 0x0003200C;
+ break;
+ case 4:
+ addr = 0x00032010;
+ break;
+ case 5:
+ addr = 0x00032014;
+ break;
+ case 7:
+ addr = 0x0003201C;
+ break;
+ default:
+ ath10k_warn(ar, "invalid CE id: %d", ce_id);
+ break;
+ }
+ return addr;
+}
+
+static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ u32 ce_id = ce_state->id;
+ u32 addr = 0;
+
+ switch (ce_id) {
+ case 1:
+ addr = 0x00032034;
+ break;
+ case 2:
+ addr = 0x00032038;
+ break;
+ case 5:
+ addr = 0x00032044;
+ break;
+ case 7:
+ addr = 0x0003204C;
+ break;
+ case 8:
+ addr = 0x00032050;
+ break;
+ case 9:
+ addr = 0x00032054;
+ break;
+ case 10:
+ addr = 0x00032058;
+ break;
+ case 11:
+ addr = 0x0003205C;
+ break;
+ default:
+ ath10k_warn(ar, "invalid CE id: %d", ce_id);
+ break;
+ }
+
+ return addr;
+}
+
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
struct ath10k_hw_ce_regs_addr_map *addr_map)
@@ -116,11 +185,46 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
ar->hw_ce_regs->sr_wr_index_addr);
}
+static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
+ u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
+}
+
static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_ce_read32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->current_srri_addr);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_srri_addr);
+
+ return index;
+}
+
+static inline void
+ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state,
+ unsigned int value)
+{
+ ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
+}
+
+static inline void
+ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state,
+ unsigned int value)
+{
+ ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
@@ -181,11 +285,31 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
}
+static inline
+ u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
+ CE_DDR_RRI_MASK;
+}
+
static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- return ath10k_ce_read32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->current_drri_addr);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_drri_addr);
+
+ return index;
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
@@ -376,8 +500,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
/* WORKAROUND */
- if (!(flags & CE_SEND_FLAG_GATHER))
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (ar->hw_params.shadow_reg_support)
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
src_ring->write_index = write_index;
exit:
@@ -395,7 +525,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc_64 *desc, sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index = src_ring->sw_index;
+ unsigned int sw_index;
unsigned int write_index = src_ring->write_index;
u32 ctrl_addr = ce_state->ctrl_addr;
__le32 *addr;
@@ -409,6 +539,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
+ if (ar->hw_params.rri_on_ddr)
+ sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
+ else
+ sw_index = src_ring->sw_index;
+
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
ret = -ENOSR;
@@ -464,6 +599,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
buffer, nbytes, transfer_id, flags);
}
+EXPORT_SYMBOL(ath10k_ce_send_nolock);
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
@@ -491,6 +627,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
src_ring->per_transfer_context[src_ring->write_index] = NULL;
}
+EXPORT_SYMBOL(__ath10k_ce_send_revert);
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
@@ -510,6 +647,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
return ret;
}
+EXPORT_SYMBOL(ath10k_ce_send);
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
@@ -525,6 +663,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
return delta;
}
+EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
@@ -539,6 +678,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
+EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr)
@@ -615,13 +755,14 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
/* Prevent CE ring stuck issue that will occur when ring is full.
* Make sure that write index is 1 less than read index.
*/
- if ((cur_write_idx + nentries) == dest_ring->sw_index)
+ if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
nentries -= 1;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
dest_ring->write_index = write_index;
}
+EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr)
@@ -636,6 +777,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
return ret;
}
+EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
/*
* Guts of ath10k_ce_completed_recv_next.
@@ -748,6 +890,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
per_transfer_ctx,
nbytesp);
}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
@@ -766,6 +909,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
@@ -882,6 +1026,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
per_transfer_contextp,
bufferp);
}
+EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
/*
* Guts of ath10k_ce_completed_send_next.
@@ -915,7 +1060,10 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
src_ring->hw_index = read_index;
}
- read_index = src_ring->hw_index;
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
if (read_index == sw_index)
return -EIO;
@@ -936,6 +1084,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
static void ath10k_ce_extract_desc_data(struct ath10k *ar,
struct ath10k_ce_ring *src_ring,
@@ -1025,6 +1174,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
+EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp)
@@ -1040,6 +1190,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next);
/*
* Guts of interrupt handler for per-engine interrupts on a particular CE.
@@ -1078,6 +1229,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
spin_unlock_bh(&ce->ce_lock);
}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service);
/*
* Handler for per-engine interrupts on ALL active CEs.
@@ -1102,6 +1254,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
ath10k_ce_per_engine_service(ar, ce_id);
}
}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
/*
* Adjust interrupts for the copy complete handler.
@@ -1139,6 +1292,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
return 0;
}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
@@ -1154,6 +1308,7 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
ath10k_ce_per_engine_handler_adjust(ce_state);
}
}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
@@ -1234,6 +1389,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
return 0;
}
+static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 nentries)
+{
+ src_ring->shadow_base_unaligned = kcalloc(nentries,
+ sizeof(struct ce_desc),
+ GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned)
+ return -ENOMEM;
+
+ src_ring->shadow_base = (struct ce_desc *)
+ PTR_ALIGN(src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+ return 0;
+}
+
static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
@@ -1241,6 +1412,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
+ int ret;
nentries = roundup_pow_of_two(nentries);
@@ -1277,6 +1449,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ALIGN(src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
return src_ring;
}
@@ -1287,6 +1472,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
+ int ret;
nentries = roundup_pow_of_two(nentries);
@@ -1322,6 +1508,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
ALIGN(src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
return src_ring;
}
@@ -1454,6 +1653,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
return 0;
}
+EXPORT_SYMBOL(ath10k_ce_init_pipe);
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
@@ -1479,6 +1679,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
}
+EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
@@ -1486,6 +1687,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
@@ -1515,6 +1718,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc_64) +
@@ -1545,6 +1750,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->ops->ce_free_pipe(ar, ce_id);
}
+EXPORT_SYMBOL(ath10k_ce_free_pipe);
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
@@ -1584,6 +1790,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
spin_unlock_bh(&ce->ce_lock);
}
+EXPORT_SYMBOL(ath10k_ce_dump_registers);
static const struct ath10k_ce_ops ce_ops = {
.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
@@ -1680,3 +1887,47 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
return 0;
}
+EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
+
+void ath10k_ce_alloc_rri(struct ath10k *ar)
+{
+ int i;
+ u32 value;
+ u32 ctrl1_regs;
+ u32 ce_base_addr;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->vaddr_rri = dma_alloc_coherent(ar->dev,
+ (CE_COUNT * sizeof(u32)),
+ &ce->paddr_rri, GFP_KERNEL);
+
+ if (!ce->vaddr_rri)
+ return;
+
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
+ lower_32_bits(ce->paddr_rri));
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
+ (upper_32_bits(ce->paddr_rri) &
+ CE_DESC_FLAGS_GET_MASK));
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
+ ce_base_addr = ath10k_ce_base_address(ar, i);
+ value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
+ value |= ar->hw_ce_regs->upd->mask;
+ ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
+ }
+
+ memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_rri);
+
+void ath10k_ce_free_rri(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
+ ce->vaddr_rri,
+ ce->paddr_rri);
+}
+EXPORT_SYMBOL(ath10k_ce_free_rri);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 2c3c8f5e90ea..dbeffaef6024 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -48,6 +49,9 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
+#define CE_DDR_RRI_MASK GENMASK(15, 0)
+#define CE_DDR_DRRI_SHIFT 16
+
struct ce_desc {
__le32 addr;
__le16 nbytes;
@@ -113,6 +117,9 @@ struct ath10k_ce_ring {
/* CE address space */
u32 base_addr_ce_space;
+ char *shadow_base_unaligned;
+ struct ce_desc *shadow_base;
+
/* keep last */
void *per_transfer_context[0];
};
@@ -153,6 +160,8 @@ struct ath10k_ce {
spinlock_t ce_lock;
const struct ath10k_bus_ops *bus_ops;
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+ u32 *vaddr_rri;
+ dma_addr_t paddr_rri;
};
/*==================Send====================*/
@@ -261,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data);
+void ath10k_ce_alloc_rri(struct ath10k *ar);
+void ath10k_ce_free_rri(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
@@ -327,6 +338,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \
+ - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
@@ -355,14 +369,18 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
+#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0))
static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
- ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
- CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+ if (!ar->hw_params.per_ce_irq)
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+ else
+ return CE_INTERRUPT_SUMMARY;
}
#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 8a3020dbd4cf..4cf54a7ef09a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -90,6 +90,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -119,6 +121,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -148,6 +153,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -176,6 +184,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -204,6 +215,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -232,6 +246,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -263,6 +280,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -297,6 +317,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -336,6 +359,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -374,6 +400,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -402,6 +431,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -432,6 +464,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -467,6 +502,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -487,6 +525,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
+ .per_ce_irq = true,
+ .shadow_reg_support = true,
+ .rri_on_ddr = true,
},
};
@@ -1253,14 +1294,61 @@ out:
return ret;
}
+static int ath10k_core_search_bd(struct ath10k *ar,
+ const char *boardname,
+ const u8 *data,
+ size_t len)
+{
+ size_t ie_len;
+ struct ath10k_fw_ie *hdr;
+ int ret = -ENOENT, ie_id;
+
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH10K_BD_IE_BOARD:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ /* return result of parse_bd_ie_board() or -ENOENT */
+ return ret;
+}
+
static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
const char *boardname,
+ const char *fallback_boardname,
const char *filename)
{
- size_t len, magic_len, ie_len;
- struct ath10k_fw_ie *hdr;
+ size_t len, magic_len;
const u8 *data;
- int ret, ie_id;
+ int ret;
ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
@@ -1298,69 +1386,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
data += magic_len;
len -= magic_len;
- while (len > sizeof(struct ath10k_fw_ie)) {
- hdr = (struct ath10k_fw_ie *)data;
- ie_id = le32_to_cpu(hdr->id);
- ie_len = le32_to_cpu(hdr->len);
-
- len -= sizeof(*hdr);
- data = hdr->data;
-
- if (len < ALIGN(ie_len, 4)) {
- ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
- ie_id, ie_len, len);
- ret = -EINVAL;
- goto err;
- }
-
- switch (ie_id) {
- case ATH10K_BD_IE_BOARD:
- ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
- boardname);
- if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') {
- /* try default bdf if variant was not found */
- char *s, *v = ",variant=";
- char boardname2[100];
-
- strlcpy(boardname2, boardname,
- sizeof(boardname2));
-
- s = strstr(boardname2, v);
- if (s)
- *s = '\0'; /* strip ",variant=%s" */
-
- ret = ath10k_core_parse_bd_ie_board(ar, data,
- ie_len,
- boardname2);
- }
-
- if (ret == -ENOENT)
- /* no match found, continue */
- break;
- else if (ret)
- /* there was an error, bail out */
- goto err;
+ /* attempt to find boardname in the IE list */
+ ret = ath10k_core_search_bd(ar, boardname, data, len);
- /* board data found */
- goto out;
- }
+ /* if we didn't find it and have a fallback name, try that */
+ if (ret == -ENOENT && fallback_boardname)
+ ret = ath10k_core_search_bd(ar, fallback_boardname, data, len);
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
- len -= ie_len;
- data += ie_len;
- }
-
-out:
- if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
+ if (ret == -ENOENT) {
ath10k_err(ar,
"failed to fetch board data for %s from %s/%s\n",
boardname, ar->hw_params.fw.dir, filename);
ret = -ENODATA;
- goto err;
}
+ if (ret)
+ goto err;
+
return 0;
err:
@@ -1369,12 +1411,12 @@ err:
}
static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
- size_t name_len)
+ size_t name_len, bool with_variant)
{
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
- if (ar->id.bdf_ext[0] != '\0')
+ if (with_variant && ar->id.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ar->id.bdf_ext);
@@ -1400,17 +1442,26 @@ out:
static int ath10k_core_fetch_board_file(struct ath10k *ar)
{
- char boardname[100];
+ char boardname[100], fallback_boardname[100];
int ret;
- ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
+ ret = ath10k_core_create_board_name(ar, boardname,
+ sizeof(boardname), true);
if (ret) {
ath10k_err(ar, "failed to create board name: %d", ret);
return ret;
}
+ ret = ath10k_core_create_board_name(ar, fallback_boardname,
+ sizeof(boardname), false);
+ if (ret) {
+ ath10k_err(ar, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
ar->bd_api = 2;
ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+ fallback_boardname,
ATH10K_BOARD_API2_FILE);
if (!ret)
goto success;
@@ -2472,6 +2523,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ar->hw->wiphy->hw_version = target_info.version;
break;
case ATH10K_BUS_SNOC:
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_hif_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
break;
default:
ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index c17d805d68cc..e4ac8f2831fd 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -52,6 +52,8 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
+#define ATH10K_INVALID_RSSI 128
+
#define ATH10K_MAX_NUM_MGMT_PENDING 128
/* number of failed packets (20 packets with 16 sw reties each) */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 6da4e3369c5a..1a59ea0068c2 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -20,13 +20,14 @@
#include <linux/kernel.h>
#include "core.h"
+#include "bmi.h"
#include "debug.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
void *vaddr; /* for debugging mostly */
- u32 paddr;
+ dma_addr_t paddr;
u16 len;
};
@@ -93,6 +94,9 @@ struct ath10k_hif_ops {
/* fetch calibration data from target eeprom */
int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
size_t *data_len);
+
+ int (*get_target_info)(struct ath10k *ar,
+ struct bmi_target_info *target_info);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -218,4 +222,13 @@ static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
}
+static inline int ath10k_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *tgt_info)
+{
+ if (!ar->hif.ops->get_target_info)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->get_target_info(ar, tgt_info);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 492dc5b4bbf2..8902720b4e49 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -542,8 +542,14 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id)
return "NMI Data";
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
+ return "HTT Data";
case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
+ case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
+ return "PKTLOG";
}
return "Unknown";
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index a2f8814b3e53..34877597dd6a 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -248,6 +248,7 @@ enum ath10k_htc_svc_gid {
ATH10K_HTC_SVC_GRP_WMI = 1,
ATH10K_HTC_SVC_GRP_NMI = 2,
ATH10K_HTC_SVC_GRP_HTT = 3,
+ ATH10K_LOG_SERVICE_GROUP = 6,
ATH10K_HTC_SVC_GRP_TEST = 254,
ATH10K_HTC_SVC_GRP_LAST = 255,
@@ -273,6 +274,9 @@ enum ath10k_htc_svc_id {
ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+ ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1),
+ ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2),
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0),
/* raw stream service (i.e. flash, tcmd, calibration apps) */
ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
};
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 625198dea18b..21a67f82f037 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -257,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
- status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+ status = ath10k_htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
- status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
+ status = ath10k_htt_send_rx_ring_cfg(htt);
if (status) {
ath10k_warn(ar, "failed to setup rx ring: %d\n",
status);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 8cc2a8b278e4..5d3ff80f3a1f 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -127,6 +128,19 @@ struct htt_msdu_ext_desc_64 {
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
+#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
+
+#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
+
enum htt_data_tx_desc_flags0 {
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
@@ -533,12 +547,18 @@ struct htt_ver_resp {
u8 rsvd0;
} __packed;
+#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
+
+#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
+
struct htt_mgmt_tx_completion {
u8 rsvd0;
u8 rsvd1;
- u8 rsvd2;
+ u8 flags;
__le32 desc_id;
__le32 status;
+ __le32 ppdu_id;
+ __le32 info;
} __packed;
#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
@@ -1648,6 +1668,7 @@ struct htt_resp {
struct htt_tx_done {
u16 msdu_id;
u16 status;
+ u8 ack_rssi;
};
enum htt_tx_compl_state {
@@ -1848,6 +1869,57 @@ struct ath10k_htt_tx_ops {
void (*htt_free_txbuff)(struct ath10k_htt *htt);
};
+static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_rx_ring_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_rx_ring_cfg(htt);
+}
+
+static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+}
+
+static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_frag_desc)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_frag_desc(htt);
+}
+
+static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_frag_desc)
+ htt->tx_ops->htt_free_frag_desc(htt);
+}
+
+static inline int ath10k_htt_tx(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ return htt->tx_ops->htt_tx(htt, txmode, msdu);
+}
+
+static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_txbuff)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_txbuff(htt);
+}
+
+static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_txbuff)
+ htt->tx_ops->htt_free_txbuff(htt);
+}
+
struct ath10k_htt_rx_ops {
size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
@@ -1857,6 +1929,43 @@ struct ath10k_htt_rx_ops {
void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
};
+static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_rx_ring_size)
+ return 0;
+
+ return htt->rx_ops->htt_get_rx_ring_size(htt);
+}
+
+static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ if (htt->rx_ops->htt_config_paddrs_ring)
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
+}
+
+static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
+ dma_addr_t paddr,
+ int idx)
+{
+ if (htt->rx_ops->htt_set_paddrs_ring)
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+}
+
+static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_vaddr_ring)
+ return NULL;
+
+ return htt->rx_ops->htt_get_vaddr_ring(htt);
+}
+
+static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
+{
+ if (htt->rx_ops->htt_reset_paddrs_ring)
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+}
+
#define RX_HTT_HDR_STATUS_LEN 64
/* This structure layout is programmed via rx ring setup
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 5e02e26158f6..bd23f6940488 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,6 +25,7 @@
#include "mac.h"
#include <linux/log2.h>
+#include <linux/bitfield.h>
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -181,7 +182,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
- htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+ ath10k_htt_set_paddrs_ring(htt, paddr, idx);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
@@ -286,8 +287,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
- htt->rx_ops->htt_get_rx_ring_size(htt),
- htt->rx_ops->htt_get_vaddr_ring(htt),
+ ath10k_htt_get_rx_ring_size(htt),
+ ath10k_htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
dma_free_coherent(htt->ar->dev,
@@ -314,7 +315,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
- htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+ ath10k_htt_reset_paddrs_ring(htt, idx);
idx++;
idx &= htt->rx_ring.size_mask;
@@ -586,13 +587,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
- size = htt->rx_ops->htt_get_rx_ring_size(htt);
+ size = ath10k_htt_get_rx_ring_size(htt);
vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
if (!vaddr_ring)
goto err_dma_ring;
- htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
+ ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
htt->rx_ring.base_paddr = paddr;
vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -626,7 +627,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
err_dma_idx:
dma_free_coherent(htt->ar->dev,
- htt->rx_ops->htt_get_rx_ring_size(htt),
+ ath10k_htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
err_dma_ring:
@@ -2719,12 +2720,21 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
struct htt_tx_done tx_done = {};
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+ int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ ar->wmi.svc_map) &&
+ (resp->mgmt_tx_completion.flags &
+ HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
+ tx_done.ack_rssi =
+ FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
+ info);
+ }
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.status = HTT_TX_COMPL_STATE_NOACK;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index d334b7be1fea..5d8b97a0ccaa 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -443,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
int ret;
- ret = htt->tx_ops->htt_alloc_txbuff(htt);
+ ret = ath10k_htt_alloc_txbuff(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
return ret;
}
- ret = htt->tx_ops->htt_alloc_frag_desc(htt);
+ ret = ath10k_htt_alloc_frag_desc(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
@@ -473,10 +473,10 @@ free_txq:
ath10k_htt_tx_free_txq(htt);
free_frag_desc:
- htt->tx_ops->htt_free_frag_desc(htt);
+ ath10k_htt_free_frag_desc(htt);
free_txbuf:
- htt->tx_ops->htt_free_txbuff(htt);
+ ath10k_htt_free_txbuff(htt);
return ret;
}
@@ -530,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
if (!htt->tx_mem_allocated)
return;
- htt->tx_ops->htt_free_txbuff(htt);
+ ath10k_htt_free_txbuff(htt);
ath10k_htt_tx_free_txq(htt);
- htt->tx_ops->htt_free_frag_desc(htt);
+ ath10k_htt_free_frag_desc(htt);
ath10k_htt_tx_free_txdone_fifo(htt);
htt->tx_mem_allocated = false;
}
@@ -1475,8 +1475,11 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- if (ar->hw_params.continuous_frag_desc)
- ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+ if (ar->hw_params.continuous_frag_desc) {
+ memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
+ ext_desc->tso_flag[3] |=
+ __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
+ }
}
/* Prevent firmware from sending up tx inspection requests. There's
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 497ac33e0fbf..677535b3d207 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -310,6 +310,12 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
+static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+ .shift = 19,
+ .mask = 0x00080000,
+ .enable = 0x00000000,
+};
+
const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
.sr_base_addr = 0x00000000,
.sr_size_addr = 0x00000008,
@@ -320,8 +326,6 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
.dst_wr_index_addr = 0x00000040,
.current_srri_addr = 0x00000044,
.current_drri_addr = 0x00000048,
- .ddr_addr_for_rri_low = 0x00000004,
- .ddr_addr_for_rri_high = 0x00000008,
.ce_rri_low = 0x0024C004,
.ce_rri_high = 0x0024C008,
.host_ie_addr = 0x0000002c,
@@ -331,6 +335,7 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
.misc_regs = &wcn3990_misc_reg,
.wm_srcr = &wcn3990_wm_src_ring,
.wm_dstr = &wcn3990_wm_dst_ring,
+ .upd = &wcn3990_ctrl1_upd,
};
const struct ath10k_hw_values wcn3990_values = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 413b1b4321f7..b8bdabe73073 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -131,7 +132,7 @@ enum qca9377_chip_id_rev {
/* WCN3990 1.0 definitions */
#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
-#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0"
+#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0"
#define ATH10K_FW_FILE_BASE "firmware"
#define ATH10K_FW_API_MAX 6
@@ -335,6 +336,12 @@ struct ath10k_hw_ce_dst_src_wm_regs {
struct ath10k_hw_ce_regs_addr_map *wm_low;
struct ath10k_hw_ce_regs_addr_map *wm_high; };
+struct ath10k_hw_ce_ctrl1_upd {
+ u32 shift;
+ u32 mask;
+ u32 enable;
+};
+
struct ath10k_hw_ce_regs {
u32 sr_base_addr;
u32 sr_size_addr;
@@ -357,7 +364,9 @@ struct ath10k_hw_ce_regs {
struct ath10k_hw_ce_cmd_halt *cmd_halt;
struct ath10k_hw_ce_host_ie *host_ie;
struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; };
+ struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ struct ath10k_hw_ce_ctrl1_upd *upd;
+};
struct ath10k_hw_values {
u32 rtc_state_val_on;
@@ -568,6 +577,15 @@ struct ath10k_hw_params {
/* Target rx ring fill level */
u32 rx_ring_fill_level;
+
+ /* target supporting per ce IRQ */
+ bool per_ce_irq;
+
+ /* target supporting shadow register for ce write */
+ bool shadow_reg_support;
+
+ /* target supporting retention restore on ddr */
+ bool rri_on_ddr;
};
struct htt_rx_desc;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index bf05a3689558..3d7119ad7c7a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3598,7 +3598,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
switch (txpath) {
case ATH10K_MAC_TX_HTT:
- ret = htt->tx_ops->htt_tx(htt, txmode, skb);
+ ret = ath10k_htt_tx(htt, txmode, skb);
break;
case ATH10K_MAC_TX_HTT_MGMT:
ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -4679,6 +4679,13 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
+ param = ar->wmi.pdev_param->idle_ps_config;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
+ goto err_core_stop;
+ }
+
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
/*
@@ -5717,6 +5724,12 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
+ ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ }
+
if (req->n_channels) {
arg.n_channels = req->n_channels;
for (i = 0; i < arg.n_channels; i++)
@@ -8433,6 +8446,17 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_dfs_detector_exit;
}
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
ar->hw->wiphy->cipher_suites = cipher_suites;
/* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index fd1566cd7d2b..af2cf55c4c1e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1383,8 +1383,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
for (i = 0; i < n_items - 1; i++) {
ath10k_dbg(ar, ATH10K_DBG_PCI,
- "pci tx item %d paddr 0x%08x len %d n_items %d\n",
- i, items[i].paddr, items[i].len, n_items);
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
@@ -1401,8 +1401,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
/* `i` is equal to `n_items -1` after for() */
ath10k_dbg(ar, ATH10K_DBG_PCI,
- "pci tx item %d paddr 0x%08x len %d n_items %d\n",
- i, items[i].paddr, items[i].len, n_items);
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 03a69e5b1116..2d04c54a4153 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1957,25 +1957,25 @@ static int ath10k_sdio_probe(struct sdio_func *func,
ar_sdio = ath10k_sdio_priv(ar);
ar_sdio->irq_data.irq_proc_reg =
- kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs),
- GFP_KERNEL);
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
+ GFP_KERNEL);
if (!ar_sdio->irq_data.irq_proc_reg) {
ret = -ENOMEM;
goto err_core_destroy;
}
ar_sdio->irq_data.irq_en_reg =
- kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs),
- GFP_KERNEL);
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
+ GFP_KERNEL);
if (!ar_sdio->irq_data.irq_en_reg) {
ret = -ENOMEM;
- goto err_free_proc_reg;
+ goto err_core_destroy;
}
- ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
if (!ar_sdio->bmi_buf) {
ret = -ENOMEM;
- goto err_free_en_reg;
+ goto err_core_destroy;
}
ar_sdio->func = func;
@@ -1995,7 +1995,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
if (!ar_sdio->workqueue) {
ret = -ENOMEM;
- goto err_free_bmi_buf;
+ goto err_core_destroy;
}
for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
@@ -2011,7 +2011,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
ret = -ENODEV;
ath10k_err(ar, "unsupported device id %u (0x%x)\n",
dev_id_base, id->device);
- goto err_free_bmi_buf;
+ goto err_core_destroy;
}
ar->id.vendor = id->vendor;
@@ -2040,12 +2040,6 @@ static int ath10k_sdio_probe(struct sdio_func *func,
err_free_wq:
destroy_workqueue(ar_sdio->workqueue);
-err_free_bmi_buf:
- kfree(ar_sdio->bmi_buf);
-err_free_en_reg:
- kfree(ar_sdio->irq_data.irq_en_reg);
-err_free_proc_reg:
- kfree(ar_sdio->irq_data.irq_proc_reg);
err_core_destroy:
ath10k_core_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
new file mode 100644
index 000000000000..47a4d2a5bd4c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -0,0 +1,1414 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "ce.h"
+#include "snoc.h"
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#define WCN3990_CE_ATTR_FLAGS 0
+#define ATH10K_SNOC_RX_POST_RETRY_MS 50
+#define CE_POLL_PIPE 4
+
+static char *const ce_name[] = {
+ "WLAN_CE_0",
+ "WLAN_CE_1",
+ "WLAN_CE_2",
+ "WLAN_CE_3",
+ "WLAN_CE_4",
+ "WLAN_CE_5",
+ "WLAN_CE_6",
+ "WLAN_CE_7",
+ "WLAN_CE_8",
+ "WLAN_CE_9",
+ "WLAN_CE_10",
+ "WLAN_CE_11",
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static const struct ath10k_snoc_drv_priv drv_priv = {
+ .hw_rev = ATH10K_HW_WCN3990,
+ .dma_mask = DMA_BIT_MASK(37),
+};
+
+static struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath10k_snoc_htc_rx_cb,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 256,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htt_tx_cb,
+ },
+
+ /* CE5: target->host HTT (ipa_uc->target ) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_rx_cb,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = 2048,
+ .dest_nentries = 2,
+ },
+
+ /* CE8: Target to uMC */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE11: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+};
+
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(5),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(9),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(10),
+ },
+ { /* in = DL = target -> host pktlog */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(11),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ iowrite32(value, ar_snoc->mem + offset);
+}
+
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ u32 val;
+
+ val = ioread32(ar_snoc->mem + offset);
+
+ return val;
+}
+
+static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map snoc rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+ if (ret) {
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ spin_lock_bh(&ce->ce_lock);
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ce->ce_lock);
+ while (num--) {
+ ret = __ath10k_snoc_rx_post_buf(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ break;
+ ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
+ mod_timer(&ar_snoc->rx_post_retry, jiffies +
+ ATH10K_SNOC_RX_POST_RETRY_MS);
+ break;
+ }
+ }
+}
+
+static void ath10k_snoc_rx_post(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
+}
+
+static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+
+ callback(ar, skb);
+ }
+
+ ath10k_snoc_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
+}
+
+static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_snoc_rx_post(ar);
+}
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *snoc_pipe;
+ struct ath10k_ce_pipe *ce_pipe;
+ int err, i = 0;
+
+ snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+ ce_pipe = snoc_pipe->ce_hdl;
+ spin_lock_bh(&ce->ce_lock);
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ce->ce_lock);
+ return err;
+}
+
+static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ target_info->version = ATH10K_HW_WCN3990;
+ target_info->type = ATH10K_HW_WCN3990;
+
+ return 0;
+}
+
+static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ int resources;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+ if (!force) {
+ resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+ (void)ath10k_snoc_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+}
+
+static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct ath10k_snoc *ar_snoc;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ar_snoc = ath10k_snoc_priv(ar);
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info;
+ int pipe_num;
+
+ del_timer_sync(&ar_snoc->rx_post_retry);
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe_info = &ar_snoc->pipe_info[pipe_num];
+ ath10k_snoc_rx_pipe_cleanup(pipe_info);
+ ath10k_snoc_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_snoc_hif_stop(struct ath10k *ar)
+{
+ ath10k_snoc_irq_disable(ar);
+ ath10k_snoc_buffer_cleanup(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+}
+
+static int ath10k_snoc_hif_start(struct ath10k *ar)
+{
+ ath10k_snoc_irq_enable(ar);
+ ath10k_snoc_rx_post(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ return 0;
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+{
+ return 0;
+}
+
+static void ath10k_snoc_wlan_disable(struct ath10k *ar)
+{
+}
+
+static void ath10k_snoc_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ ath10k_snoc_wlan_disable(ar);
+ ath10k_ce_free_rri(ar);
+}
+
+static int ath10k_snoc_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+ __func__, ar->state);
+
+ ret = ath10k_snoc_wlan_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_ce_alloc_rri(ar);
+
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_wlan_enable;
+ }
+
+ napi_enable(&ar->napi);
+ return 0;
+
+err_wlan_enable:
+ ath10k_snoc_wlan_disable(ar);
+
+ return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+ .start = ath10k_snoc_hif_start,
+ .stop = ath10k_snoc_hif_stop,
+ .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
+ .power_up = ath10k_snoc_hif_power_up,
+ .power_down = ath10k_snoc_hif_power_down,
+ .tx_sg = ath10k_snoc_hif_tx_sg,
+ .send_complete_check = ath10k_snoc_hif_send_complete_check,
+ .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
+ .get_target_info = ath10k_snoc_hif_get_target_info,
+};
+
+static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+};
+
+int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT_MAX; i++) {
+ if (ar_snoc->ce_irqs[i].irq_line == irq)
+ return i;
+ }
+ ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+ return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
+ return IRQ_HANDLED;
+ }
+
+ ath10k_snoc_irq_disable(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
+
+ ath10k_ce_per_engine_service_any(ar);
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ ath10k_snoc_irq_enable(ar);
+ }
+
+ return done;
+}
+
+void ath10k_snoc_init_napi(struct ath10k *ar)
+{
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
+ ATH10K_NAPI_BUDGET);
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int irqflags = IRQF_TRIGGER_RISING;
+ int ret, id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+ ath10k_snoc_per_engine_handler,
+ irqflags, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d",
+ id, ret);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ for (id -= 1; id >= 0; id--)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+
+ return ret;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct platform_device *pdev;
+ struct resource *res;
+ int i, ret = 0;
+
+ pdev = ar_snoc->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+ if (!res) {
+ ath10k_err(ar, "Memory base not found in DT\n");
+ return -EINVAL;
+ }
+
+ ar_snoc->mem_pa = res->start;
+ ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
+ resource_size(res));
+ if (!ar_snoc->mem) {
+ ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
+ &ar_snoc->mem_pa);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CE_COUNT; i++) {
+ res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
+ if (!res) {
+ ath10k_err(ar, "failed to get IRQ%d\n", i);
+ ret = -ENODEV;
+ goto out;
+ }
+ ar_snoc->ce_irqs[i].irq_line = res->start;
+ }
+
+out:
+ return ret;
+}
+
+static int ath10k_snoc_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *pipe;
+ int i, ret;
+
+ timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
+ spin_lock_init(&ce->ce_lock);
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_snoc->pipe_info[i];
+ pipe->ce_hdl = &ce->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
+ }
+ ath10k_snoc_init_napi(ar);
+
+ return 0;
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+ int i;
+
+ netif_napi_del(&ar->napi);
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+ struct regulator *reg;
+ int ret = 0;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (ret == -EPROBE_DEFER) {
+ ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ return ret;
+ }
+ if (vreg_info->required) {
+ ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ return ret;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+
+ vreg_info->reg = reg;
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ ath10k_err(ar, "snoc clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ return ret;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
+ clk_info->name,
+ ret);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
+ clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v);
+ goto err_reg_config;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar,
+ "failed to set regulator %s load: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua);
+ goto err_reg_config;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "failed to enable regulator %s\n",
+ vreg_info->name);
+ goto err_reg_config;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ return 0;
+
+err_reg_config:
+ for (; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int ret = 0;
+ int i;
+
+ for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "failed to disable regulator %s\n",
+ vreg_info->name);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "failed to set load %s\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "failed to set voltage %s\n",
+ vreg_info->name);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
+ clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ ath10k_err(ar, "failed to set clock %s freq %u\n",
+ clk_info->name, clk_info->freq);
+ goto err_clock_config;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clock %s\n",
+ clk_info->name);
+ goto err_clock_config;
+ }
+ }
+
+ return 0;
+
+err_clock_config:
+ for (; i >= 0; i--) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
+ clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+
+ ret = ath10k_wcn3990_vreg_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wcn3990_clk_init(ar);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ ath10k_wcn3990_vreg_off(ar);
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
+
+ ath10k_wcn3990_clk_deinit(ar);
+
+ ret = ath10k_wcn3990_vreg_off(ar);
+
+ return ret;
+}
+
+static const struct of_device_id ath10k_snoc_dt_match[] = {
+ { .compatible = "qcom,wcn3990-wifi",
+ .data = &drv_priv,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
+
+static int ath10k_snoc_probe(struct platform_device *pdev)
+{
+ const struct ath10k_snoc_drv_priv *drv_data;
+ const struct of_device_id *of_id;
+ struct ath10k_snoc *ar_snoc;
+ struct device *dev;
+ struct ath10k *ar;
+ int ret;
+ u32 i;
+
+ of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ drv_data = of_id->data;
+ dev = &pdev->dev;
+
+ ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
+ if (ret) {
+ dev_err(dev, "failed to set dma mask: %d", ret);
+ return ret;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
+ drv_data->hw_rev, &ath10k_snoc_hif_ops);
+ if (!ar) {
+ dev_err(dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ar_snoc = ath10k_snoc_priv(ar);
+ ar_snoc->dev = pdev;
+ platform_set_drvdata(pdev, ar);
+ ar_snoc->ar = ar;
+ ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
+ ar->ce_priv = &ar_snoc->ce;
+
+ ath10k_snoc_resource_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ath10k_snoc_setup_resource(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+ ret = ath10k_snoc_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_release_resource;
+ }
+
+ ar_snoc->vreg = vreg_cfg;
+ for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+ ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+ if (ret)
+ goto err_free_irq;
+ }
+
+ ar_snoc->clk = clk_cfg;
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+ if (ret)
+ goto err_free_irq;
+ }
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath10k_core_register(ar, drv_data->hw_rev);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_hw_power_off;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
+ ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
+
+ return 0;
+
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
+err_free_irq:
+ ath10k_snoc_free_irq(ar);
+
+err_release_resource:
+ ath10k_snoc_release_resource(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static int ath10k_snoc_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+ ath10k_core_unregister(ar);
+ ath10k_hw_power_off(ar);
+ ath10k_snoc_free_irq(ar);
+ ath10k_snoc_release_resource(ar);
+ ath10k_core_destroy(ar);
+
+ return 0;
+}
+
+static struct platform_driver ath10k_snoc_driver = {
+ .probe = ath10k_snoc_probe,
+ .remove = ath10k_snoc_remove,
+ .driver = {
+ .name = "ath10k_snoc",
+ .owner = THIS_MODULE,
+ .of_match_table = ath10k_snoc_dt_match,
+ },
+};
+
+static int __init ath10k_snoc_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ath10k_snoc_driver);
+ if (ret)
+ pr_err("failed to register ath10k snoc driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath10k_snoc_init);
+
+static void __exit ath10k_snoc_exit(void)
+{
+ platform_driver_unregister(&ath10k_snoc_driver);
+}
+module_exit(ath10k_snoc_exit);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
new file mode 100644
index 000000000000..05dc98f46ccd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SNOC_H_
+#define _SNOC_H_
+
+#include "hw.h"
+#include "ce.h"
+#include "pci.h"
+
+struct ath10k_snoc_drv_priv {
+ enum ath10k_hw_rev hw_rev;
+ u64 dma_mask;
+};
+
+struct snoc_state {
+ u32 pipe_cfg_addr;
+ u32 svc_to_pipe_map;
+};
+
+struct ath10k_snoc_pipe {
+ struct ath10k_ce_pipe *ce_hdl;
+ u8 pipe_num;
+ struct ath10k *hif_ce_state;
+ size_t buf_sz;
+ /* protect ce info */
+ spinlock_t pipe_lock;
+ struct ath10k_snoc *ar_snoc;
+};
+
+struct ath10k_snoc_target_info {
+ u32 target_version;
+ u32 target_type;
+ u32 target_revision;
+ u32 soc_version;
+};
+
+struct ath10k_snoc_ce_irq {
+ u32 irq_line;
+};
+
+struct ath10k_wcn3990_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+struct ath10k_snoc {
+ struct platform_device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+ dma_addr_t mem_pa;
+ struct ath10k_snoc_target_info target_info;
+ size_t mem_len;
+ struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX];
+ struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
+ struct ath10k_ce ce;
+ struct timer_list rx_post_retry;
+ struct ath10k_wcn3990_vreg_info *vreg;
+ struct ath10k_wcn3990_clk_info *clk;
+};
+
+static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
+{
+ return (struct ath10k_snoc *)ar->drv_priv;
+}
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+
+#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 70e23bbf7171..cda164f6e9f6 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -119,6 +120,13 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
+ if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
+ tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
+ info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ tx_done->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
+
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c35e45340b4f..e37d16b31afe 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -25,6 +25,7 @@ struct sk_buff;
struct wmi_ops {
void (*rx)(struct ath10k *ar, struct sk_buff *skb);
void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+ void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_scan_ev_arg *arg);
@@ -54,6 +55,9 @@ struct wmi_ops {
struct wmi_wow_ev_arg *arg);
int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_echo_ev_arg *arg);
+ int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg);
+
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -115,6 +119,8 @@ struct wmi_ops {
u32 value);
struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
+ struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
+ u32 prob_req_oui);
struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
@@ -230,6 +236,17 @@ ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
}
static inline int
+ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc_ext)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc_ext(in, out, len);
+ return 0;
+}
+
+static inline int
ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
struct wmi_scan_ev_arg *arg)
{
@@ -330,6 +347,15 @@ ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_avail)
+ return -EOPNOTSUPP;
+ return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
@@ -891,6 +917,26 @@ ath10k_wmi_scan_chan_list(struct ath10k *ar,
}
static inline int
+ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ u32 prob_req_oui;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ if (!ar->wmi.ops->gen_scan_prob_req_oui)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->scan_prob_req_oui_cmdid);
+}
+
+static inline int
ath10k_wmi_peer_assoc(struct ath10k *ar,
const struct wmi_peer_assoc_complete_arg *arg)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 9d1b0a459069..01f4eb201330 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -594,6 +594,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb);
break;
+ case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
break;
@@ -1117,6 +1120,39 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_avail_ev_arg *arg = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
+ arg->service_map_ext_len = *(__le32 *)ptr;
+ arg->service_map_ext = ptr + sizeof(__le32);
+ return 0;
+ default:
+ break;
+ }
+ return -EPROTO;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_avail_parse, arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
struct ath10k_fw_stats_vdev *dst)
{
@@ -1600,6 +1636,8 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
cmd->ie_len = __cpu_to_le32(arg->ie_len);
cmd->num_probes = __cpu_to_le32(3);
+ ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
/* FIXME: There are some scan flag inconsistencies across firmwares,
* e.g. WMI-TLV inverts the logic behind the following flag.
@@ -2447,6 +2485,27 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
+{
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
@@ -3416,6 +3475,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
@@ -3740,6 +3800,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
static const struct wmi_ops wmi_tlv_ops = {
.rx = ath10k_wmi_tlv_op_rx,
.map_svc = wmi_tlv_svc_map,
+ .map_svc_ext = wmi_tlv_svc_map_ext,
.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
@@ -3751,6 +3812,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+ .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
@@ -3782,6 +3844,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+ .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index fa3773ec7c68..954c50bb3f66 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -295,6 +295,7 @@ enum wmi_tlv_cmd_id {
enum wmi_tlv_event_id {
WMI_TLV_SERVICE_READY_EVENTID = 0x1,
WMI_TLV_READY_EVENTID,
+ WMI_TLV_SERVICE_AVAILABLE_EVENTID,
WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
WMI_TLV_CHAN_INFO_EVENTID,
@@ -949,6 +950,275 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TLV_TAG_STRUCT_LRO_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_SCPC_EVENT,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP,
+ WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT,
+ WMI_TLV_TAG_STRUCT_ATF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SET_MBO,
+ WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_END_REQ,
+ WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT,
+ WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD,
+ WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI,
+ WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST,
+ WMI_TLV_TAG_STRUCT_BWF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT,
+ WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS,
+ WMI_TLV_TAG_STRUCT_TX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS,
+ WMI_TLV_TAG_STRUCT_RX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS,
+ WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_TX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_RX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD,
+ WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI,
+ WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD,
+ WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PKGID_EVENT,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT,
+ WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT,
+ WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS,
+ WMI_TLV_TAG_STRUCT_HE_RATE_SET,
+ WMI_TLV_TAG_STRUCT_CONGESTION_STATS,
+ WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP,
+ WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TLV_TAG_STRUCT_PMK_CACHE,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT,
+ WMI_TLV_TAG_STRUCT_BTM_CONFIG,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM,
+ WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT,
WMI_TLV_TAG_MAX
};
@@ -1068,16 +1338,74 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_WLAN_STATS_REPORT,
WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
+ WMI_TLV_SERVICE_RCPI_SUPPORT,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT,
+ WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_TLV_SERVICE_REGULATORY_DB,
+ WMI_TLV_SERVICE_11D_OFFLOAD,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_TLV_SERVICE_PKT_ROUTING,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI,
+ WMI_TLV_SERVICE_8SS_TX_BFEE,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_TLV_SERVICE_ACK_TIMEOUT,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64,
+ WMI_TLV_MAX_SERVICE = 128,
+
+/* NOTE:
+ * The above service flags are delivered in the wmi_service_bitmap field
+ * of the WMI_TLV_SERVICE_READY_EVENT message.
+ * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT
+ * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's
+ * wmi_service_bitmap field.
+ * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the
+ * WMI_TLV_SERVICE_READY_EVENT message.
+ */
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT,
+ WMI_TLV_SERVICE_FILS_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW,
+ WMI_TLV_SERVICE_MAWC_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT,
+ WMI_TLV_SERVICE_THERM_THROT,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+
+ WMI_TLV_MAX_EXT_SERVICE = 256,
};
-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
- ((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
- BIT((svc_id) % (sizeof(u32))))
+#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \
+ (svc_id) >= (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \
+ BIT(((((svc_id) - (len)) % 32) & 0x1f)))
#define SVCMAP(x, y, len) \
do { \
- if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
+ if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+ (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
__set_bit(y, out); \
} while (0)
@@ -1228,6 +1556,14 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_MGMT_TX_WMI, len);
}
+static inline void
+wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_TLV_MAX_SERVICE);
+}
+
#undef SVCMAP
struct wmi_tlv {
@@ -1370,6 +1706,15 @@ struct wmi_tlv_scan_chan_list_cmd {
__le32 num_scan_chans;
} __packed;
+struct wmi_scan_prob_req_oui_cmd {
+/* OUI to be used in Probe Request frame when random MAC address is
+ * requested part of scan parameters. This is applied to both FW internal
+ * scans and host initiated scans. Host can request for random MAC address
+ * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag.
+ */
+ __le32 prob_req_oui;
+} __packed;
+
struct wmi_tlv_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
@@ -1378,6 +1723,8 @@ struct wmi_tlv_start_scan_cmd {
__le32 num_ssids;
__le32 ie_len;
__le32 num_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
} __packed;
struct wmi_tlv_vdev_start_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c5e1ca5945db..df2e92a6c9bd 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -42,6 +42,7 @@ static struct wmi_cmd_map wmi_cmd_map = {
.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
@@ -207,6 +208,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
@@ -374,6 +376,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
@@ -541,6 +544,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
@@ -1338,6 +1342,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
@@ -4357,7 +4362,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
rate_code[i],
type);
snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
- strncat(tpc_value, buff, strlen(buff));
+ strlcat(tpc_value, buff, sizeof(tpc_value));
}
tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
@@ -4694,7 +4699,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
rate_code[i],
type, pream_idx);
snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
- strncat(tpc_value, buff, strlen(buff));
+ strlcat(tpc_value, buff, sizeof(tpc_value));
}
tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
@@ -5059,7 +5064,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
return;
}
- memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
arg.service_map_len);
@@ -5269,6 +5273,21 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+ struct wmi_svc_avail_ev_arg arg = {};
+
+ ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse servive available event: %d\n",
+ ret);
+ }
+
+ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
+ __le32_to_cpu(arg.service_map_ext_len));
+}
+
static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
{
const struct wmi_pdev_temperature_event *ev;
@@ -5465,6 +5484,9 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_ready(ar, skb);
ath10k_wmi_queue_set_coverage_class_work(ar);
break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -5880,6 +5902,8 @@ int ath10k_wmi_connect(struct ath10k *ar)
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
@@ -7648,7 +7672,7 @@ ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
cmd->param = __cpu_to_le32(param);
ath10k_dbg(ar, ATH10K_DBG_WMI,
- "wmi pdev get tcp config param:%d\n", param);
+ "wmi pdev get tpc config param %d\n", param);
return skb;
}
@@ -7768,7 +7792,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "Sched self tiggers", pdev->self_triggers);
+ "Sched self triggers", pdev->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Dropped due to SW retries",
pdev->sw_retry_failure);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 6fbc84c29521..16a39244a34f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -201,6 +201,8 @@ enum wmi_service {
WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
WMI_SERVICE_HOST_DFS_CHECK_SUPPORT,
WMI_SERVICE_TPC_STATS_FINAL,
+ WMI_SERVICE_RESET_CHIP,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
/* keep last */
WMI_SERVICE_MAX,
@@ -238,6 +240,8 @@ enum wmi_10x_service {
WMI_10X_SERVICE_MESH,
WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
WMI_10X_SERVICE_PEER_STATS,
+ WMI_10X_SERVICE_RESET_CHIP,
+ WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
};
enum wmi_main_service {
@@ -548,6 +552,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
SVCMAP(WMI_10X_SERVICE_PEER_STATS,
WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10X_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -783,6 +791,7 @@ struct wmi_cmd_map {
u32 stop_scan_cmdid;
u32 scan_chan_list_cmdid;
u32 scan_sch_prio_tbl_cmdid;
+ u32 scan_prob_req_oui_cmdid;
u32 pdev_set_regdomain_cmdid;
u32 pdev_set_channel_cmdid;
u32 pdev_set_param_cmdid;
@@ -1183,6 +1192,7 @@ enum wmi_cmd_id {
enum wmi_event_id {
WMI_SERVICE_READY_EVENTID = 0x1,
WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
/* Scan specific events */
WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
@@ -3159,6 +3169,8 @@ struct wmi_start_scan_arg {
u16 channels[64];
struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
};
/* scan control flags */
@@ -3182,6 +3194,12 @@ struct wmi_start_scan_arg {
*/
#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+/* Use random MAC address for TA for Probe Request frame and add
+ * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame.
+ * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored.
+ */
+#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000
+
/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
#define WMI_SCAN_CLASS_MASK 0xFF000000
@@ -6632,6 +6650,11 @@ struct wmi_svc_rdy_ev_arg {
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
};
+struct wmi_svc_avail_ev_arg {
+ __le32 service_map_ext_len;
+ const __le32 *service_map_ext;
+};
+
struct wmi_rdy_ev_arg {
__le32 sw_version;
__le32 abi_version;
@@ -6812,6 +6835,10 @@ struct wmi_wow_ev_arg {
#define WOW_MIN_PATTERN_SIZE 1
#define WOW_MAX_PATTERN_SIZE 148
#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
enum wmi_tdls_state {
WMI_TDLS_DISABLE,
@@ -7052,6 +7079,7 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
int left_len, struct wmi_phyerr_ev_arg *arg);
void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index c4cbccb29b31..a6b179f88d36 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -76,6 +77,109 @@ static int ath10k_wow_cleanup(struct ath10k *ar)
return 0;
}
+/**
+ * Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath10k_wow_convert_8023_to_80211
+ (struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Caculate new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Caculate new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
@@ -116,22 +220,40 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
int j;
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
-
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+ new_pattern = old_pattern;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN)
+ ath10k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ else
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
- patterns[i].pattern,
- bitmask,
- patterns[i].pattern_len,
- patterns[i].pkt_offset);
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
if (ret) {
ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
@@ -345,6 +467,12 @@ int ath10k_wow_init(struct ath10k *ar)
return -EINVAL;
ar->wow.wowlan_support = ath10k_wowlan_support;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 0f965e9f38a4..4e94b22eaada 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -645,7 +645,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
"CRC Err", tgt_stats->rx_crc_err);
len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
- "Key chache miss", tgt_stats->rx_key_cache_miss);
+ "Key cache miss", tgt_stats->rx_key_cache_miss);
len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
"Decrypt Err", tgt_stats->rx_decrypt_err);
len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index db95f85751e3..808fb30be9ad 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -426,7 +426,7 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
{
u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0;
- struct station_info sinfo;
+ struct station_info *sinfo;
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
@@ -482,16 +482,20 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
keymgmt, ucipher, auth, apsd_info);
/* send event to application */
- memset(&sinfo, 0, sizeof(sinfo));
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return;
/* TODO: sinfo.generation */
- sinfo.assoc_req_ies = ies;
- sinfo.assoc_req_ies_len = ies_len;
+ sinfo->assoc_req_ies = ies;
+ sinfo->assoc_req_ies_len = ies_len;
- cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
+ cfg80211_new_sta(vif->ndev, mac_addr, sinfo, GFP_KERNEL);
netif_wake_queue(vif->ndev);
+
+ kfree(sinfo);
}
void disconnect_timer_handler(struct timer_list *t)
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 6fee9a464cce..c8844f55574c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -41,7 +41,7 @@ static const int BIN_DELTA_MAX = 10;
/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
#define NUM_DIFFS 3
-static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1);
+#define FFT_NUM_SAMPLES (NUM_DIFFS + 1)
/* Threshold for difference of delta peaks */
static const int MAX_DIFF = 2;
@@ -114,7 +114,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
datalen, num_fft_packets);
- if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ if (num_fft_packets < FFT_NUM_SAMPLES) {
ath_dbg(common, DFS, "not enough packets for chirp\n");
return false;
}
@@ -136,7 +136,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
return false;
ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
datalen, num_fft_packets);
- if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+ if (num_fft_packets < FFT_NUM_SAMPLES) {
ath_dbg(common, DFS, "not enough packets for chirp\n");
return false;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 2c3b899a88fa..bd2b946a65c9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
if (!cur_ctl)
goto out_fail;
- spin_lock_init(&cur_ctl->skb_lock);
cur_ctl->ctl_blk_order = i;
if (i == 0) {
ch->head_blk_ctl = cur_ctl;
@@ -275,12 +274,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
-static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev,
+ struct wcn36xx_dxe_ctl *ctl,
+ gfp_t gfp)
{
struct wcn36xx_dxe_desc *dxe = ctl->desc;
struct sk_buff *skb;
- skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
if (skb == NULL)
return -ENOMEM;
@@ -307,7 +308,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
- wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
cur_ctl = cur_ctl->next;
}
@@ -367,7 +368,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
spin_lock_irqsave(&ch->lock, flags);
ctl = ch->tail_blk_ctl;
do {
- if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)
+ if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
break;
if (ctl->skb) {
dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
@@ -377,18 +378,16 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
/* Keep frame until TX status comes */
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
- spin_lock(&ctl->skb_lock);
+
if (wcn->queues_stopped) {
wcn->queues_stopped = false;
ieee80211_wake_queues(wcn->hw);
}
- spin_unlock(&ctl->skb_lock);
ctl->skb = NULL;
}
ctl = ctl->next;
- } while (ctl != ch->head_blk_ctl &&
- !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD));
+ } while (ctl != ch->head_blk_ctl);
ch->tail_blk_ctl = ctl;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -530,10 +529,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
int_mask = WCN36XX_DXE_INT_CH3_MASK;
}
- while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) {
+ while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
- ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
+ ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
if (0 == ret) {
/* new skb allocation ok. Use the new one and queue
* the old one to network system.
@@ -654,8 +653,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
spin_lock_irqsave(&ch->lock, flags);
ctl = ch->head_blk_ctl;
- spin_lock(&ctl->next->skb_lock);
-
/*
* If skb is not null that means that we reached the tail of the ring
* hence ring is full. Stop queues to let mac80211 back off until ring
@@ -664,11 +661,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
if (NULL != ctl->next->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
- spin_unlock(&ctl->next->skb_lock);
spin_unlock_irqrestore(&ch->lock, flags);
return -EBUSY;
}
- spin_unlock(&ctl->next->skb_lock);
ctl->skb = NULL;
desc = ctl->desc;
@@ -693,7 +688,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
/* Set source address of the SKB we send */
ctl = ctl->next;
- ctl->skb = skb;
desc = ctl->desc;
if (ctl->bd_cpu_addr) {
wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
@@ -702,10 +696,16 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
}
desc->src_addr_l = dma_map_single(wcn->dev,
- ctl->skb->data,
- ctl->skb->len,
+ skb->data,
+ skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(wcn->dev, desc->src_addr_l)) {
+ dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ ctl->skb = skb;
desc->dst_addr_l = ch->dxe_wq;
desc->fr_len = ctl->skb->len;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index ce580960d109..31b81b7547a3 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -422,7 +422,6 @@ struct wcn36xx_dxe_ctl {
unsigned int desc_phy_addr;
int ctl_blk_order;
struct sk_buff *skb;
- spinlock_t skb_lock;
void *bd_cpu_addr;
dma_addr_t bd_phy_addr;
};
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 182963522941..2aed6c233508 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -88,6 +88,12 @@
/* version string max length (including NULL) */
#define WCN36XX_HAL_VERSION_LENGTH 64
+/* How many frames until we start a-mpdu TX session */
+#define WCN36XX_AMPDU_START_THRESH 20
+
+#define WCN36XX_MAX_SCAN_SSIDS 9
+#define WCN36XX_MAX_SCAN_IE_LEN 500
+
/* message types for messages exchanged between WDI and HAL */
enum wcn36xx_hal_host_msg_type {
/* Init/De-Init */
@@ -1170,7 +1176,7 @@ struct wcn36xx_hal_start_scan_offload_req_msg {
/* IE field */
u16 ie_len;
- u8 ie[0];
+ u8 ie[WCN36XX_MAX_SCAN_IE_LEN];
} __packed;
struct wcn36xx_hal_start_scan_offload_rsp_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 69d6be59d97f..e3b91b3b38ef 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -353,6 +353,19 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+ cancel_work_sync(&wcn->scan_work);
+
+ mutex_lock(&wcn->scan_lock);
+ if (wcn->scan_req) {
+ struct cfg80211_scan_info scan_info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ }
+ wcn->scan_req = NULL;
+ mutex_unlock(&wcn->scan_lock);
+
wcn36xx_debugfs_exit(wcn);
wcn36xx_smd_stop(wcn);
wcn36xx_dxe_deinit(wcn);
@@ -549,6 +562,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
} else {
wcn36xx_smd_set_bsskey(wcn,
vif_priv->encrypt_type,
+ vif_priv->bss_index,
key_conf->keyidx,
key_conf->keylen,
key);
@@ -566,10 +580,13 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX)
+ wcn36xx_smd_remove_bsskey(wcn,
+ vif_priv->encrypt_type,
+ vif_priv->bss_index,
+ key_conf->keyidx);
+
vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
- wcn36xx_smd_remove_bsskey(wcn,
- vif_priv->encrypt_type,
- key_conf->keyidx);
} else {
sta_priv->is_data_encrypted = false;
/* do not remove key if disassociated */
@@ -670,10 +687,18 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
- /* ieee80211_scan_completed will be called on FW scan indication */
- wcn36xx_smd_stop_hw_scan(wcn);
-
- cancel_work_sync(&wcn->scan_work);
+ if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ /* ieee80211_scan_completed will be called on FW scan
+ * indication */
+ wcn36xx_smd_stop_hw_scan(wcn);
+ } else {
+ struct cfg80211_scan_info scan_info = {
+ .aborted = true,
+ };
+
+ cancel_work_sync(&wcn->scan_work);
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ }
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
@@ -953,6 +978,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 8932af5e4d8d..ea74f2b92df5 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -620,9 +620,13 @@ out:
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
int ret, i;
+ if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN)
+ return -EINVAL;
+
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
@@ -631,6 +635,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.max_ch_time = 100;
msg_body.scan_hidden = 1;
memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+ msg_body.bss_type = vif_priv->bss_type;
msg_body.p2p_search = vif->p2p;
msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
@@ -646,6 +651,14 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
for (i = 0; i < msg_body.num_channel; i++)
msg_body.channels[i] = req->channels[i]->hw_value;
+ msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN;
+
+ if (req->ie_len > 0) {
+ msg_body.ie_len = req->ie_len;
+ msg_body.header.len += req->ie_len;
+ memcpy(msg_body.ie, req->ie, req->ie_len);
+ }
+
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
@@ -1399,9 +1412,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bss->spectrum_mgt_enable = 0;
bss->tx_mgmt_power = 0;
bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
-
bss->action = update;
+ vif_priv->bss_type = bss->bss_type;
+
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
bss->bssid, bss->self_mac_addr, bss->bss_type,
@@ -1446,6 +1460,10 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
int ret = 0;
mutex_lock(&wcn->hal_mutex);
+
+ if (vif_priv->bss_index == WCN36XX_HAL_BSS_INVALID_IDX)
+ goto out;
+
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
msg_body.bss_index = vif_priv->bss_index;
@@ -1464,6 +1482,8 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
goto out;
}
+
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
@@ -1630,6 +1650,7 @@ out:
int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
+ u8 bssidx,
u8 keyidx,
u8 keylen,
u8 *key)
@@ -1639,7 +1660,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
- msg_body.bss_idx = 0;
+ msg_body.bss_idx = bssidx;
msg_body.enc_type = enc_type;
msg_body.num_keys = 1;
msg_body.keys[0].id = keyidx;
@@ -1700,6 +1721,7 @@ out:
int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
+ u8 bssidx,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
@@ -1707,7 +1729,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
- msg_body.bss_idx = 0;
+ msg_body.bss_idx = bssidx;
msg_body.enc_type = enc_type;
msg_body.key_id = keyidx;
@@ -2132,11 +2154,13 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
return -EIO;
}
- wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)\n", rsp->type);
switch (rsp->type) {
case WCN36XX_HAL_SCAN_IND_FAILED:
+ case WCN36XX_HAL_SCAN_IND_DEQUEUED:
scan_info.aborted = true;
+ /* fall through */
case WCN36XX_HAL_SCAN_IND_COMPLETED:
mutex_lock(&wcn->scan_lock);
wcn->scan_req = NULL;
@@ -2147,7 +2171,6 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
break;
case WCN36XX_HAL_SCAN_IND_STARTED:
case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
- case WCN36XX_HAL_SCAN_IND_DEQUEUED:
case WCN36XX_HAL_SCAN_IND_PREEMPTED:
case WCN36XX_HAL_SCAN_IND_RESTARTED:
break;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 8076edf40ac8..61bb8d43138c 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -97,6 +97,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
u8 sta_index);
int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
+ u8 bssidx,
u8 keyidx,
u8 keylen,
u8 *key);
@@ -106,6 +107,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
u8 sta_index);
int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
+ u8 bssidx,
u8 keyidx);
int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b1768ed6b0be..a6902371e89c 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -273,6 +273,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
struct wcn36xx_tx_bd bd;
+ int ret;
memset(&bd, 0, sizeof(bd));
@@ -317,5 +318,17 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32));
bd.tx_bd_sign = 0xbdbdbdbd;
- return wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
+ ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
+ if (ret && bd.tx_comp) {
+ /* If the skb has not been transmitted,
+ * don't keep a reference to it.
+ */
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ ieee80211_wake_queues(wcn->hw);
+ }
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 5854adf43f3a..9343989d1169 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -32,12 +32,6 @@
#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
#define WCN36XX_AGGR_BUFFER_SIZE 64
-/* How many frames until we start a-mpdu TX session */
-#define WCN36XX_AMPDU_START_THRESH 20
-
-#define WCN36XX_MAX_SCAN_SSIDS 9
-#define WCN36XX_MAX_SCAN_IE_LEN 500
-
extern unsigned int wcn36xx_dbg_mask;
enum wcn36xx_debug_mask {
@@ -123,6 +117,7 @@ struct wcn36xx_vif {
bool is_joining;
bool sta_assoc;
struct wcn36xx_hal_mac_ssid ssid;
+ enum wcn36xx_hal_bss_type bss_type;
/* Power management */
enum wcn36xx_power_state pw_state;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 8c90b3111f0b..11e46e44381e 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1200,8 +1200,12 @@ static const struct file_operations fops_freq = {
static int wil_link_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct station_info sinfo;
- int i, rc;
+ struct station_info *sinfo;
+ int i, rc = 0;
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
struct wil_sta_info *p = &wil->sta[i];
@@ -1229,19 +1233,21 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL;
if (vif) {
- rc = wil_cid_fill_sinfo(vif, i, &sinfo);
+ rc = wil_cid_fill_sinfo(vif, i, sinfo);
if (rc)
- return rc;
+ goto out;
- seq_printf(s, " Tx_mcs = %d\n", sinfo.txrate.mcs);
- seq_printf(s, " Rx_mcs = %d\n", sinfo.rxrate.mcs);
- seq_printf(s, " SQ = %d\n", sinfo.signal);
+ seq_printf(s, " Tx_mcs = %d\n", sinfo->txrate.mcs);
+ seq_printf(s, " Rx_mcs = %d\n", sinfo->rxrate.mcs);
+ seq_printf(s, " SQ = %d\n", sinfo->signal);
} else {
seq_puts(s, " INVALID MID\n");
}
}
- return 0;
+out:
+ kfree(sinfo);
+ return rc;
}
static int wil_link_seq_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index a4b413e8d55a..82aec6b06d09 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -391,7 +391,7 @@ static void wil_fw_error_worker(struct work_struct *work)
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
fw_error_worker);
struct net_device *ndev = wil->main_ndev;
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct wireless_dev *wdev;
wil_dbg_misc(wil, "fw error worker\n");
@@ -399,6 +399,7 @@ static void wil_fw_error_worker(struct work_struct *work)
wil_info(wil, "No recovery - interface is down\n");
return;
}
+ wdev = ndev->ieee80211_ptr;
/* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
* passed since last recovery attempt
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index a3dda9a97c1f..90de9a916241 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -824,7 +824,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wmi_connect_event *evt = d;
int ch; /* channel number */
- struct station_info sinfo;
+ struct station_info *sinfo;
u8 *assoc_req_ie, *assoc_resp_ie;
size_t assoc_req_ielen, assoc_resp_ielen;
/* capinfo(u16) + listen_interval(u16) + IEs */
@@ -940,6 +940,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
vif->bss = NULL;
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+
if (rc) {
if (disable_ap_sme)
/* notify new_sta has failed */
@@ -947,16 +948,22 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
goto out;
}
- memset(&sinfo, 0, sizeof(sinfo));
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo) {
+ rc = -ENOMEM;
+ goto out;
+ }
- sinfo.generation = wil->sinfo_gen++;
+ sinfo->generation = wil->sinfo_gen++;
if (assoc_req_ie) {
- sinfo.assoc_req_ies = assoc_req_ie;
- sinfo.assoc_req_ies_len = assoc_req_ielen;
+ sinfo->assoc_req_ies = assoc_req_ie;
+ sinfo->assoc_req_ies_len = assoc_req_ielen;
}
- cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL);
+
+ kfree(sinfo);
} else {
wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
evt->cid);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 6837064908be..6b0e1ec346cb 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1484,7 +1484,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
int slot, firstused;
bool frame_succeed;
int skip;
- static u8 err_out1, err_out2;
+ static u8 err_out1;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
@@ -1518,13 +1518,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
} else {
/* More than a single header/data pair were missed.
- * Report this error once.
+ * Report this error, and reset the controller to
+ * revive operation.
*/
- if (!err_out2)
- b43dbg(dev->wl,
- "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
- ring->index, firstused, slot);
- err_out2 = 1;
+ b43dbg(dev->wl,
+ "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ b43_controller_restart(dev, "Out of order TX");
return;
}
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index cfa617ddb2f1..2f0c64cef65f 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -1064,7 +1064,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 0b68240ec7b4..a1915411c280 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -963,6 +963,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 89b86251910e..f5b405c98047 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2728,9 +2728,8 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
- struct ieee80211_supported_band *band;
+ enum nl80211_band band;
struct brcmu_chan ch;
u16 channel;
u32 freq;
@@ -2738,7 +2737,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
u16 notify_interval;
u8 *notify_ie;
size_t notify_ielen;
- s32 notify_signal;
+ struct cfg80211_inform_bss bss_data = {};
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
brcmf_err("Bss info is larger than buffer. Discarding\n");
@@ -2753,32 +2752,33 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
channel = bi->ctl_ch;
if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[NL80211_BAND_2GHZ];
+ band = NL80211_BAND_2GHZ;
else
- band = wiphy->bands[NL80211_BAND_5GHZ];
+ band = NL80211_BAND_5GHZ;
- freq = ieee80211_channel_to_frequency(channel, band->band);
- notify_channel = ieee80211_get_channel(wiphy, freq);
+ freq = ieee80211_channel_to_frequency(channel, band);
+ bss_data.chan = ieee80211_get_channel(wiphy, freq);
+ bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
notify_capability = le16_to_cpu(bi->capability);
notify_interval = le16_to_cpu(bi->beacon_period);
notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
notify_ielen = le32_to_cpu(bi->ie_length);
- notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+ bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100;
brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
- brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
+ brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal);
- bss = cfg80211_inform_bss(wiphy, notify_channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *)bi->BSSID,
- 0, notify_capability,
- notify_interval, notify_ie,
- notify_ielen, notify_signal,
- GFP_KERNEL);
+ bss = cfg80211_inform_bss_data(wiphy, &bss_data,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ (const u8 *)bi->BSSID,
+ 0, notify_capability,
+ notify_interval, notify_ie,
+ notify_ielen, GFP_KERNEL);
if (!bss)
return -ENOMEM;
@@ -5498,7 +5498,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
- struct station_info sinfo;
+ struct station_info *sinfo;
brcmf_dbg(CONN, "event %s (%u), reason %d\n",
brcmf_fweh_event_name(event), event, reason);
@@ -5511,16 +5511,22 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
(reason == BRCMF_E_STATUS_SUCCESS)) {
- memset(&sinfo, 0, sizeof(sinfo));
if (!data) {
brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
}
- sinfo.assoc_req_ies = data;
- sinfo.assoc_req_ies_len = e->datalen;
+
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
+ sinfo->assoc_req_ies = data;
+ sinfo->assoc_req_ies_len = e->datalen;
generation++;
- sinfo.generation = generation;
- cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
+ sinfo->generation = generation;
+ cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL);
+
+ kfree(sinfo);
} else if ((event == BRCMF_E_DISASSOC_IND) ||
(event == BRCMF_E_DEAUTH_IND) ||
(event == BRCMF_E_DEAUTH)) {
@@ -6512,6 +6518,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT |
+ WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 3b829fed8631..927d62b3d41b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -689,6 +689,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43525_CHIP_ID:
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
+ case BRCM_CC_43664_CHIP_ID:
return 0x200000;
case CY_CC_4373_CHIP_ID:
return 0x160000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9277f4c2bfeb..9095b830ae4d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req)
kfree(req);
}
-static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
+static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *cur;
@@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length);
cur->nv_data.data = nvram;
cur->nv_data.len = nvram_length;
- return;
+ return 0;
fail:
- brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- fwctx->done(fwctx->dev, -ENOENT, NULL);
- brcmf_fw_free_request(fwctx->req);
- kfree(fwctx);
+ return -ENOENT;
}
static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
@@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
fw ? "" : "not ");
- if (fw) {
- if (cur->type == BRCMF_FW_TYPE_BINARY)
- cur->binary = fw;
- else if (cur->type == BRCMF_FW_TYPE_NVRAM)
- brcmf_fw_request_nvram_done(fw, fwctx);
- else
- release_firmware(fw);
- } else if (cur->type == BRCMF_FW_TYPE_NVRAM) {
- brcmf_fw_request_nvram_done(NULL, fwctx);
- } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) {
+ if (!fw)
ret = -ENOENT;
+
+ switch (cur->type) {
+ case BRCMF_FW_TYPE_NVRAM:
+ ret = brcmf_fw_request_nvram_done(fw, fwctx);
+ break;
+ case BRCMF_FW_TYPE_BINARY:
+ cur->binary = fw;
+ break;
+ default:
+ /* something fishy here so bail out early */
+ brcmf_err("unknown fw type: %d\n", cur->type);
+ release_firmware(fw);
+ ret = -EINVAL;
goto fail;
}
+ if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ goto fail;
+
do {
if (++fwctx->curpos == fwctx->req->n_items) {
ret = 0;
@@ -630,7 +634,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
struct brcmf_fw_request *
brcmf_fw_alloc_request(u32 chip, u32 chiprev,
- struct brcmf_firmware_mapping mapping_table[],
+ const struct brcmf_firmware_mapping mapping_table[],
u32 table_size, struct brcmf_fw_name *fwnames,
u32 n_fwnames)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 79a21095c349..2893e56910f0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -80,7 +80,7 @@ struct brcmf_fw_name {
struct brcmf_fw_request *
brcmf_fw_alloc_request(u32 chip, u32 chiprev,
- struct brcmf_firmware_mapping mapping_table[],
+ const struct brcmf_firmware_mapping mapping_table[],
u32 table_size, struct brcmf_fw_name *fwnames,
u32 n_fwnames);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index f93ba6be1ef8..692235d25277 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -27,8 +27,10 @@
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE 32
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE 24
-#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 16
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7 16
+#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 24
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 32
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 40
#define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48
struct msgbuf_buf_addr {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index bcef208a81a5..4b2149b48362 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2073,6 +2073,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
}
pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+ /* firmware requires unique mac address for p2pdev interface */
+ if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) {
+ brcmf_err("discovery vif must be different from primary interface\n");
+ return ERR_PTR(-EINVAL);
+ }
+
brcmf_p2p_generate_bss_mac(p2p, addr);
brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 091c191ce259..f0797aeada67 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -59,7 +59,7 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
-static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
+static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
@@ -75,6 +75,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
+ BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
@@ -104,7 +105,8 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
#define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
#define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
-#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
+#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
+#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
#define BRCMF_PCIE2_INTA 0x01
#define BRCMF_PCIE2_INTB 0x02
@@ -134,11 +136,13 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
+#define BRCMF_PCIE_SHARED_VERSION_7 7
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
-#define BRCMF_PCIE_MAX_SHARED_VERSION 6
+#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
+#define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
@@ -315,6 +319,14 @@ static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
};
+static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
+ BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
+ BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
+ BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
+ BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
+ BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
+};
+
static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
@@ -781,6 +793,12 @@ static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
BRCMF_PCIE_MB_INT_FN0_1);
}
+static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
+{
+ if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
+ brcmf_pcie_write_reg32(devinfo,
+ BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+}
static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
{
@@ -923,7 +941,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
brcmf_dbg(PCIE, "RING !\n");
/* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
return 0;
}
@@ -998,8 +1016,14 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
struct brcmf_pcie_ringbuf *ring;
u32 size;
u32 addr;
+ const u32 *ring_itemsize_array;
+
+ if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
+ ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
+ else
+ ring_itemsize_array = brcmf_ring_itemsize;
- size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
+ size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
&dma_handle);
@@ -1009,7 +1033,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
- brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
+ brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -1018,7 +1042,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
return NULL;
}
brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
- brcmf_ring_itemsize[ring_id], dma_buf);
+ ring_itemsize_array[ring_id], dma_buf);
ring->dma_handle = dma_handle;
ring->devinfo = devinfo;
brcmf_commonring_register_cb(&ring->commonring,
@@ -1727,6 +1751,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
init_waitqueue_head(&devinfo->mbdata_resp_wait);
brcmf_pcie_intr_enable(devinfo);
+ brcmf_pcie_hostready(devinfo);
if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
return;
@@ -1949,6 +1974,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_bus_change_state(bus, BRCMF_BUS_UP);
brcmf_pcie_intr_enable(devinfo);
+ brcmf_pcie_hostready(devinfo);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 1037df7297bb..412a05b9a2b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -619,7 +619,7 @@ BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
-static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
+static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0),
BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index a0873adcc01c..a4308c6e72d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -52,7 +52,7 @@ BRCMF_FW_DEF(43242A, "brcmfmac43242a");
BRCMF_FW_DEF(43569, "brcmfmac43569");
BRCMF_FW_DEF(4373, "brcmfmac4373");
-static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = {
+static const struct brcmf_firmware_mapping brcmf_usb_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
BRCMF_FW_ENTRY(BRCM_CC_43235_CHIP_ID, 0x00000008, 43236B),
BRCMF_FW_ENTRY(BRCM_CC_43236_CHIP_ID, 0x00000008, 43236B),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 93d4cde0eb31..9d830d27b229 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -3388,13 +3388,8 @@ void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode)
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
- if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
- mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5);
- mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9);
- } else {
- mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5);
- mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9);
- }
+ mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5);
+ mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9);
if (phybw40 == 0) {
mod_phy_reg((pi), 0x410,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 57544a3a3ce4..686f7a85a045 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -57,6 +57,7 @@
#define BRCM_CC_43602_CHIP_ID 43602
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
+#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_4371_CHIP_ID 0x4371
#define CY_CC_4373_CHIP_ID 0x4373
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 236b52423506..7c4f550a1475 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3732,7 +3732,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
IPW2100_ORD(ASSOCIATED_AP_PTR,
"0 if not associated, else pointer to AP table entry"),
IPW2100_ORD(AVAILABLE_AP_CNT,
- "AP's decsribed in the AP table"),
+ "AP's described in the AP table"),
IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"),
IPW2100_ORD(STAT_AP_ASSNS, "associations"),
IPW2100_ORD(STAT_ASSN_FAIL, "association failures"),
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
index 193947865efd..ce3e35f6b60f 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
@@ -1009,7 +1009,7 @@ typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW
IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries
IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated
// AP table entry. set to 0 if not associated
- IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table
+ IPW_ORD_AVAILABLE_AP_CNT, // # of AP's described in the AP table
IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs
IPW_ORD_STAT_AP_ASSNS, // # of associations
IPW_ORD_STAT_ASSN_FAIL, // # of association failures
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 87a5e414c2f7..ba3fb1d2ddb4 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -12012,7 +12012,7 @@ MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"
#ifdef CONFIG_IPW2200_QOS
module_param(qos_enable, int, 0444);
-MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
+MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
module_param(qos_burst_enable, int, 0444);
MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index e6205eae51fd..4d08d78c6b71 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -13,7 +13,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
-iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
+iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index b2573b1d1506..591687984962 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -27,7 +28,6 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-csr.h"
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
@@ -91,7 +91,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -117,7 +118,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 1b32ad413b9e..a63ca8820568 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -115,7 +116,8 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -142,7 +144,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -163,7 +166,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -190,7 +194,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index dffd9df782b0..d4ba66aecdc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -54,7 +54,6 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 38
@@ -115,8 +114,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_22000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
@@ -137,13 +134,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.gen2 = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .tx_cmd_queue_size = 32, \
.min_umac_error_event_table = 0x400000
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -154,6 +151,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -165,6 +163,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -175,6 +174,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -185,6 +185,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_F0_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -195,6 +196,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
@@ -205,6 +207,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
IWL_DEVICE_22000,
+ .csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 4aa8f0a05c8a..a224f1be1ec2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -28,7 +29,6 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
-#include "iwl-csr.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -89,7 +89,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -153,7 +154,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 39335b7b0c16..51cec0bb75fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -135,7 +136,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -189,7 +191,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -225,7 +228,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -280,7 +284,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -313,7 +318,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -339,7 +345,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index ce741beec1fc..69bfa827e82a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -7,7 +7,8 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +35,8 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,7 +70,6 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
@@ -160,14 +161,13 @@ static const struct iwl_ht_params iwl7000_ht_params = {
#define IWL_DEVICE_7000_COMMON \
.device_family = IWL_DEVICE_FAMILY_7000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
- .dccm_offset = IWL7000_DCCM_OFFSET
+ .dccm_offset = IWL7000_DCCM_OFFSET, \
+ .csr = &iwl_csr_v1
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 3f4d9bac9f73..7262e973e0d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -7,7 +7,8 @@
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +35,7 @@
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,7 +69,6 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 36
@@ -140,8 +141,6 @@ static const struct iwl_tt_params iwl8000_tt_params = {
#define IWL_DEVICE_8000_COMMON \
.device_family = IWL_DEVICE_FAMILY_8000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
@@ -158,7 +157,8 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000
+ .min_umac_error_event_table = 0x800000, \
+ .csr = &iwl_csr_v1
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e1c869a1f8cc..9706624911f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -54,7 +54,6 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-agn-hw.h"
#include "fw/file.h"
/* Highest firmware API version supported */
@@ -135,8 +134,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.ucode_api_max = IWL9000_UCODE_API_MAX, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_9000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl9000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \
@@ -156,7 +153,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000
+ .min_umac_error_event_table = 0x800000, \
+ .csr = &iwl_csr_v1
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index e68254e12764..030482b357a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1200,16 +1200,16 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
- if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
- !data->sku_cap_band_52GHz_enable) {
+ if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable &&
+ !data->sku_cap_band_52ghz_enable) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
IWL_DEBUG_INFO(priv,
"Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
- data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
- data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
+ data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled",
+ data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled",
data->sku_cap_11n_enable ? "" : "NOT", "enabled");
priv->hw_params.tx_chains_num =
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index a57c7223df0f..5f6e855006dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -88,11 +88,6 @@ enum iwl_data_path_subcmd_ids {
TLC_MNG_CONFIG_CMD = 0xF,
/**
- * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
- */
- TLC_MNG_NOTIF_REQ_CMD = 0x10,
-
- /**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/
TLC_MNG_UPDATE_NOTIF = 0xF7,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 37c57bcbfb4a..8d6dc9189985 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -190,22 +190,36 @@ struct iwl_nvm_get_info_general {
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
/**
+ * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+ * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
+ * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
+ * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
+ * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
+ * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
+ * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
+ */
+enum iwl_nvm_mac_sku_flags {
+ NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0),
+ NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
+ NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
+ NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
+ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
+ NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
+ NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
+ NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14),
+ NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15),
+};
+
+/**
* struct iwl_nvm_get_info_sku - mac information
- * @enable_24g: band 2.4G enabled
- * @enable_5g: band 5G enabled
- * @enable_11n: 11n enabled
- * @enable_11ac: 11ac enabled
- * @mimo_disable: MIMO enabled
- * @ext_crypto: Extended crypto enabled
+ * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
*/
struct iwl_nvm_get_info_sku {
- __le32 enable_24g;
- __le32 enable_5g;
- __le32 enable_11n;
- __le32 enable_11ac;
- __le32 mimo_disable;
- __le32 ext_crypto;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */
+ __le32 mac_sku_flags;
+} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
/**
* struct iwl_nvm_get_info_phy - phy information
@@ -243,7 +257,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index e49a6f7be613..21e13a315421 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,62 +66,38 @@
/**
* enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
- * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
- * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
- * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
- * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
*/
enum iwl_tlc_mng_cfg_flags {
- IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0),
- IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1),
- IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2),
- IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3),
- IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4),
- IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5),
+ IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
+ IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
};
/**
* enum iwl_tlc_mng_cfg_cw - channel width options
- * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
- * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
- * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
- * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
- * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
*/
enum iwl_tlc_mng_cfg_cw {
- IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
- IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
- IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
- IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
- IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_CH_WIDTH_20MHZ,
+ IWL_TLC_MNG_CH_WIDTH_40MHZ,
+ IWL_TLC_MNG_CH_WIDTH_80MHZ,
+ IWL_TLC_MNG_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
};
/**
* enum iwl_tlc_mng_cfg_chains - possible chains
* @IWL_TLC_MNG_CHAIN_A_MSK: chain A
* @IWL_TLC_MNG_CHAIN_B_MSK: chain B
- * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
*/
enum iwl_tlc_mng_cfg_chains {
IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
- IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
-};
-
-/**
- * enum iwl_tlc_mng_cfg_gi - guard interval options
- * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
- * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
- * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
- * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
- */
-enum iwl_tlc_mng_cfg_gi {
- IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0),
- IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1),
- IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2),
- IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
};
/**
@@ -145,25 +123,7 @@ enum iwl_tlc_mng_cfg_mode {
};
/**
- * enum iwl_tlc_mng_vht_he_types - VHT HE types
- * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
- * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
- * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
- * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
- * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
- */
-enum iwl_tlc_mng_vht_he_types {
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
- IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
-
-};
-
-/**
- * enum iwl_tlc_mng_ht_rates - HT/VHT rates
+ * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates
* @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
* @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
* @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
@@ -174,6 +134,8 @@ enum iwl_tlc_mng_vht_he_types {
* @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
* @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
* @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10
+ * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11
* @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
*/
enum iwl_tlc_mng_ht_rates {
@@ -187,81 +149,73 @@ enum iwl_tlc_mng_ht_rates {
IWL_TLC_MNG_HT_RATE_MCS7,
IWL_TLC_MNG_HT_RATE_MCS8,
IWL_TLC_MNG_HT_RATE_MCS9,
- IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+ IWL_TLC_MNG_HT_RATE_MCS10,
+ IWL_TLC_MNG_HT_RATE_MCS11,
+ IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
};
/* Maximum supported tx antennas number */
-#define MAX_RS_ANT_NUM 3
+#define MAX_NSS 2
/**
* struct tlc_config_cmd - TLC configuration
* @sta_id: station id
* @reserved1: reserved
- * @max_supp_ch_width: channel width
- * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
- * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
- * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
- * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
- * @non_ht_supp_rates: bitmap of supported legacy rates
- * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
* @mode: &enum iwl_tlc_mng_cfg_mode
- * @reserved2: reserved
- * @he_supp_rates: bitmap of supported HE rates
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @amsdu: TX amsdu is supported
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
* @sgi_ch_width_supp: bitmap of SGI support per channel width
- * @he_gi_support: 11ax HE guard interval
- * @max_ampdu_cnt: max AMPDU size (frames count)
+ * use BIT(@enum iwl_tlc_mng_cfg_cw)
+ * @reserved2: reserved
*/
struct iwl_tlc_config_cmd {
u8 sta_id;
u8 reserved1[3];
- u8 max_supp_ch_width;
+ u8 max_ch_width;
+ u8 mode;
u8 chains;
- u8 max_supp_ss;
- u8 valid_vht_he_types;
+ u8 amsdu;
__le16 flags;
- __le16 non_ht_supp_rates;
- __le16 ht_supp_rates[MAX_RS_ANT_NUM];
- u8 mode;
- u8 reserved2;
- __le16 he_supp_rates;
+ __le16 non_ht_rates;
+ __le16 ht_rates[MAX_NSS][2];
+ __le16 max_mpdu_len;
u8 sgi_ch_width_supp;
- u8 he_gi_support;
- __le32 max_ampdu_cnt;
-} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
-
-#define IWL_TLC_NOTIF_INIT_RATE_POS 0
-#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
-#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+ u8 reserved2[1];
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
/**
- * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
- * @sta_id: relevant station
- * @reserved1: reserved
- * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
- * @interval: minimum time between notifications from TLC to the driver (msec)
- * @reserved2: reserved
+ * enum iwl_tlc_update_flags - updated fields
+ * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
+ * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
*/
-struct iwl_tlc_notif_req_config_cmd {
- u8 sta_id;
- u8 reserved1;
- __le16 flags;
- __le16 interval;
- __le16 reserved2;
-} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+enum iwl_tlc_update_flags {
+ IWL_TLC_NOTIF_FLAG_RATE = BIT(0),
+ IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
+};
/**
* struct iwl_tlc_update_notif - TLC notification from FW
* @sta_id: station id
* @reserved: reserved
* @flags: bitmap of notifications reported
- * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ * @rate: current initial rate
+ * @amsdu_size: Max AMSDU size, in bytes
+ * @amsdu_enabled: bitmap for per-TID AMSDU enablement
*/
struct iwl_tlc_update_notif {
u8 sta_id;
- u8 reserved;
- __le16 flags;
- __le32 values[16];
-} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+ u8 reserved[3];
+ __le32 flags;
+ __le32 rate;
+ __le32 amsdu_size;
+ __le32 amsdu_enabled;
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
/**
* enum iwl_tlc_debug_flags - debug options
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 7af3a0f51b77..a17c4a79b8d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -749,13 +750,9 @@ struct iwl_scan_req_umac {
} __packed;
#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
-#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \
- 4 * sizeof(u8))
-#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(u8) - sizeof(__le16))
-#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32) - 2 * sizeof(u8) - \
- sizeof(__le16))
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index dfa111bb411e..6ac240b6eace 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -131,6 +131,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
+#define IWL_DEFAULT_QUEUE_SIZE 256
+#define IWL_MGMT_QUEUE_SIZE 16
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 72259bff9922..507d9a49fa97 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -227,4 +227,40 @@ static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
cancel_delayed_work_sync(&fwrt->dump.wk);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ fwrt->timestamp.delay = 0;
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ if (!fwrt->timestamp.delay)
+ return;
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+}
+
+#else
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
+ u32 delay) {}
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 8f005cd69559..8ba5a60ec9ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -64,6 +64,7 @@
*****************************************************************************/
#include "api/commands.h"
#include "debugfs.h"
+#include "dbg.h"
#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index d93f6a4bb22d..cbbfa8e9e66d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -69,28 +69,6 @@
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir);
-static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
-{
- fwrt->timestamp.delay = 0;
- cancel_delayed_work_sync(&fwrt->timestamp.wk);
-}
-
-static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
-{
- cancel_delayed_work_sync(&fwrt->timestamp.wk);
-}
-
-static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
-{
- if (!fwrt->timestamp.delay)
- return;
-
- schedule_delayed_work(&fwrt->timestamp.wk,
- round_jiffies_relative(fwrt->timestamp.delay));
-}
-
-void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
-
#else
static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
@@ -98,13 +76,4 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
return 0;
}
-static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
-
-static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
-
-static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
-
-static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
- u32 delay) {}
-
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9b2805e1e3b1..9d939cbaf6c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,6 +145,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
+ IWL_UCODE_TLV_IML = 52,
};
struct iwl_ucode_tlv {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index b23ffe12ad84..f4912382b6af 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -241,6 +243,8 @@ enum iwl_fw_type {
* @ucode_ver: ucode version from the ucode file
* @fw_version: firmware version string
* @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
+ * @iml_len: length of the image loader image
+ * @iml: image loader fw image
* @ucode_capa: capabilities parsed from the ucode file.
* @enhance_sensitivity_table: device can do enhanced sensitivity.
* @init_evtlog_ptr: event log offset for init ucode.
@@ -267,6 +271,8 @@ struct iwl_fw {
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
+ size_t iml_len;
+ u8 *iml;
struct iwl_ucode_capabilities ucode_capa;
bool enhance_sensitivity_table;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c
deleted file mode 100644
index bd2e1fb43f5a..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "iwl-drv.h"
-#include "runtime.h"
-#include "fw/api/nvm-reg.h"
-#include "fw/api/commands.h"
-#include "iwl-nvm-parse.h"
-
-struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt)
-{
- struct iwl_nvm_get_info cmd = {};
- struct iwl_nvm_get_info_rsp *rsp;
- struct iwl_trans *trans = fwrt->trans;
- struct iwl_nvm_data *nvm;
- struct iwl_host_cmd hcmd = {
- .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
- .data = { &cmd, },
- .len = { sizeof(cmd) },
- .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
- };
- int ret;
- bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- return ERR_PTR(ret);
-
- if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
- "Invalid payload len in NVM response from FW %d",
- iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
- ret = -EINVAL;
- goto out;
- }
-
- rsp = (void *)hcmd.resp_pkt->data;
- if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
- IWL_INFO(fwrt, "OTP is empty\n");
-
- nvm = kzalloc(sizeof(*nvm) +
- sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
- GFP_KERNEL);
- if (!nvm) {
- ret = -ENOMEM;
- goto out;
- }
-
- iwl_set_hw_address_from_csr(trans, nvm);
- /* TODO: if platform NVM has MAC address - override it here */
-
- if (!is_valid_ether_addr(nvm->hw_addr)) {
- IWL_ERR(fwrt, "no valid mac address was found\n");
- ret = -EINVAL;
- goto err_free;
- }
-
- IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
-
- /* Initialize general data */
- nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
-
- /* Initialize MAC sku data */
- nvm->sku_cap_11ac_enable =
- le32_to_cpu(rsp->mac_sku.enable_11ac);
- nvm->sku_cap_11n_enable =
- le32_to_cpu(rsp->mac_sku.enable_11n);
- nvm->sku_cap_band_24GHz_enable =
- le32_to_cpu(rsp->mac_sku.enable_24g);
- nvm->sku_cap_band_52GHz_enable =
- le32_to_cpu(rsp->mac_sku.enable_5g);
- nvm->sku_cap_mimo_disabled =
- le32_to_cpu(rsp->mac_sku.mimo_disable);
-
- /* Initialize PHY sku data */
- nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
- nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
-
- /* Initialize regulatory data */
- nvm->lar_enabled =
- le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported;
-
- iwl_init_sbands(trans->dev, trans->cfg, nvm,
- rsp->regulatory.channel_profile,
- nvm->valid_tx_ant & fwrt->fw->valid_tx_ant,
- nvm->valid_rx_ant & fwrt->fw->valid_rx_ant,
- nvm->lar_enabled, false);
-
- iwl_free_resp(&hcmd);
- return nvm;
-
-err_free:
- kfree(nvm);
-out:
- iwl_free_resp(&hcmd);
- return ERR_PTR(ret);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_get_nvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 1fec8e3a6b35..9b8dd7fe7112 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
const struct fw_img *image)
{
- int sec_idx, idx;
+ int sec_idx, idx, ret;
u32 offset = 0;
/*
@@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
*/
if (sec_idx >= image->num_sec - 1) {
IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
- iwl_free_fw_paging(fwrt);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
/* copy the CSS block to the dram */
IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
sec_idx);
+ if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
+ IWL_ERR(fwrt, "CSS block is larger than paging size\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
- fwrt->fw_paging_db[0].fw_paging_size);
+ image->sec[sec_idx].len);
dma_sync_single_for_device(fwrt->trans->dev,
fwrt->fw_paging_db[0].fw_paging_phys,
fwrt->fw_paging_db[0].fw_paging_size,
@@ -213,17 +221,39 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
sec_idx++;
/*
- * copy the paging blocks to the dram
- * loop index start from 1 since that CSS block already copied to dram
- * and CSS index is 0.
- * loop stop at num_of_paging_blk since that last block is not full.
+ * Copy the paging blocks to the dram. The loop index starts
+ * from 1 since the CSS block (index 0) was already copied to
+ * dram. We use num_of_paging_blk + 1 to account for that.
*/
- for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
+ for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) {
struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ int remaining = image->sec[sec_idx].len - offset;
+ int len = block->fw_paging_size;
+
+ /*
+ * For the last block, we copy all that is remaining,
+ * for all other blocks, we copy fw_paging_size at a
+ * time. */
+ if (idx == fwrt->num_of_paging_blk) {
+ len = remaining;
+ if (remaining !=
+ fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) {
+ IWL_ERR(fwrt,
+ "Paging: last block contains more data than expected %d\n",
+ remaining);
+ ret = -EINVAL;
+ goto err;
+ }
+ } else if (block->fw_paging_size > remaining) {
+ IWL_ERR(fwrt,
+ "Paging: not enough data in other in block %d (%d)\n",
+ idx, remaining);
+ ret = -EINVAL;
+ goto err;
+ }
memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset,
- block->fw_paging_size);
+ image->sec[sec_idx].data + offset, len);
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
@@ -231,30 +261,16 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_FW(fwrt,
"Paging: copied %d paging bytes to block %d\n",
- fwrt->fw_paging_db[idx].fw_paging_size,
- idx);
-
- offset += fwrt->fw_paging_db[idx].fw_paging_size;
- }
-
- /* copy the last paging block */
- if (fwrt->num_of_pages_in_last_blk > 0) {
- struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ len, idx);
- memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset,
- FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
- dma_sync_single_for_device(fwrt->trans->dev,
- block->fw_paging_phys,
- block->fw_paging_size,
- DMA_BIDIRECTIONAL);
-
- IWL_DEBUG_FW(fwrt,
- "Paging: copied %d pages in the last block %d\n",
- fwrt->num_of_pages_in_last_blk, idx);
+ offset += block->fw_paging_size;
}
return 0;
+
+err:
+ iwl_free_fw_paging(fwrt);
+ return ret;
}
static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 3fb940ebd74a..d8db1dd100b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -170,6 +170,5 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb);
-struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f0f5636dd3ea..c503b26793f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,6 +71,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include "iwl-csr.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -151,6 +154,8 @@ enum iwl_nvm_type {
#define ANT_AC (ANT_A | ANT_C)
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
+#define MAX_ANT_NUM 3
+
static inline u8 num_of_ant(u8 mask)
{
@@ -283,6 +288,52 @@ struct iwl_pwr_tx_backoff {
};
/**
+ * struct iwl_csr_params
+ *
+ * @flag_sw_reset: reset the device
+ * @flag_mac_clock_ready:
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * This note is relevant only for pre 5xxx devices.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ * @flag_init_done: Host sets this to put device into fully operational
+ * D0 power mode. Host resets this after SW_RESET to put device into
+ * low power mode.
+ * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
+ * to allow host access to device-internal resources. Host must wait for
+ * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
+ * registers.
+ * @flag_val_mac_access_en: mac access is enabled
+ * @flag_master_dis: disable master
+ * @flag_stop_master: stop master
+ * @addr_sw_reset: address for resetting the device
+ * @mac_addr0_otp: first part of MAC address from OTP
+ * @mac_addr1_otp: second part of MAC address from OTP
+ * @mac_addr0_strap: first part of MAC address from strap
+ * @mac_addr1_strap: second part of MAC address from strap
+ */
+struct iwl_csr_params {
+ u8 flag_sw_reset;
+ u8 flag_mac_clock_ready;
+ u8 flag_init_done;
+ u8 flag_mac_access_req;
+ u8 flag_val_mac_access_en;
+ u8 flag_master_dis;
+ u8 flag_stop_master;
+ u8 addr_sw_reset;
+ u32 mac_addr0_otp;
+ u32 mac_addr1_otp;
+ u32 mac_addr0_strap;
+ u32 mac_addr1_strap;
+};
+
+/**
* struct iwl_cfg
* @name: Official name of the device
* @fw_name_pre: Firmware filename prefix. The api version and extension
@@ -294,8 +345,8 @@ struct iwl_pwr_tx_backoff {
* next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
- * @max_inst_size: The maximal length of the fw inst section
- * @max_data_size: The maximal length of the fw data section
+ * @max_inst_size: The maximal length of the fw inst section (only DVM)
+ * @max_data_size: The maximal length of the fw data section (only DVM)
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
* @non_shared_ant: the antenna that is for WiFi only
@@ -314,6 +365,7 @@ struct iwl_pwr_tx_backoff {
* @mac_addr_from_csr: read HW address from CSR registers
* @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
+ * @csr: csr flags and addresses that are different across devices
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
@@ -333,8 +385,6 @@ struct iwl_pwr_tx_backoff {
* @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
- * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
- * the regular queues
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -354,6 +404,7 @@ struct iwl_cfg {
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
const char *default_nvm_file_C_step;
const struct iwl_tt_params *thermal_params;
+ const struct iwl_csr_params *csr;
enum iwl_device_family device_family;
enum iwl_led_mode led_mode;
enum iwl_nvm_type nvm_type;
@@ -369,7 +420,7 @@ struct iwl_cfg {
u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
- u16 rx_with_siso_diversity:1,
+ u32 rx_with_siso_diversity:1,
bt_shared_single_ant:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
@@ -386,7 +437,6 @@ struct iwl_cfg {
gen2:1,
cdb:1,
dbgc_supported:1;
- u16 tx_cmd_queue_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -401,6 +451,36 @@ struct iwl_cfg {
u32 extra_phy_cfg_flags;
};
+static const struct iwl_csr_params iwl_csr_v1 = {
+ .flag_mac_clock_ready = 0,
+ .flag_val_mac_access_en = 0,
+ .flag_init_done = 2,
+ .flag_mac_access_req = 3,
+ .flag_sw_reset = 7,
+ .flag_master_dis = 8,
+ .flag_stop_master = 9,
+ .addr_sw_reset = (CSR_BASE + 0x020),
+ .mac_addr0_otp = 0x380,
+ .mac_addr1_otp = 0x384,
+ .mac_addr0_strap = 0x388,
+ .mac_addr1_strap = 0x38C
+};
+
+static const struct iwl_csr_params iwl_csr_v2 = {
+ .flag_init_done = 6,
+ .flag_mac_clock_ready = 20,
+ .flag_val_mac_access_en = 20,
+ .flag_mac_access_req = 21,
+ .flag_master_dis = 28,
+ .flag_stop_master = 29,
+ .flag_sw_reset = 31,
+ .addr_sw_reset = (CSR_BASE + 0x024),
+ .mac_addr0_otp = 0x30,
+ .mac_addr1_otp = 0x34,
+ .mac_addr0_strap = 0x38,
+ .mac_addr1_strap = 0x3C
+};
+
/*
* This list declares the config structures for all devices.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 4f0d070eda54..ba971d3946e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -8,6 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +35,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -257,7 +259,6 @@
/* RESET */
#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
-#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
@@ -280,35 +281,10 @@
* 4: GOING_TO_SLEEP
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
- * 3: MAC_ACCESS_REQ
- * Host sets this to request and maintain MAC wakeup, to allow host
- * access to device-internal resources. Host must wait for
- * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
- * device registers.
- * 2: INIT_DONE
- * Host sets this to put device into fully operational D0 power mode.
- * Host resets this after SW_RESET to put device into low power mode.
- * 0: MAC_CLOCK_READY
- * Indicates MAC (ucode processor, etc.) is powered up and can run.
- * Internal resources are accessible.
- * NOTE: This does not indicate that the processor is actually running.
- * NOTE: This does not indicate that device has completed
- * init or post-power-down restore of internal SRAM memory.
- * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
- * SRAM is restored and uCode is in normal operation mode.
- * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
- * do not need to save/restore it.
- * NOTE: After device reset, this bit remains "0" until host sets
- * INIT_DONE
*/
-#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
-#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
-#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
-#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
-
#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index aa2d5c14e202..c59ce4f8a5ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
kfree(drv->fw.dbg_mem_tlv);
+ kfree(drv->fw.iml);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -1126,6 +1127,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->n_dbg_mem_tlv++;
break;
}
+ case IWL_UCODE_TLV_IML: {
+ drv->fw.iml_len = tlv_len;
+ drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL);
+ if (!drv->fw.iml)
+ return -ENOMEM;
+ break;
+ }
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1842,3 +1850,9 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
+
+module_param_named(remove_when_gone,
+ iwlwifi_mod_params.remove_when_gone, bool,
+ 0444);
+MODULE_PARM_DESC(remove_when_gone,
+ "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 3199d345b427..777f5df8a0c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -899,8 +899,8 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
EEPROM_SKU_CAP);
data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
- data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
- data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index b33888991b94..8be50ed12300 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -81,10 +81,11 @@ struct iwl_nvm_data {
__le16 kelvin_voltage;
__le16 xtal_calib[2];
- bool sku_cap_band_24GHz_enable;
- bool sku_cap_band_52GHz_enable;
+ bool sku_cap_band_24ghz_enable;
+ bool sku_cap_band_52ghz_enable;
bool sku_cap_11n_enable;
bool sku_cap_11ac_enable;
+ bool sku_cap_11ax_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index f2cea1c7befc..ac965c34a2f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,12 +201,12 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
/* Enable 40MHz radio clock */
iwl_write32(trans, CSR_GP_CNTRL,
iwl_read32(trans, CSR_GP_CNTRL) |
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
/* wait for clock to be ready */
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
25000);
if (ret < 0) {
IWL_ERR(trans, "Time out access OTP\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a41c46e63eb1..a7dd8a8cddf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -122,6 +122,7 @@ enum iwl_uapsd_disable {
* @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
+ * @remove_when_gone: remove an inaccessible device from the PCIe bus.
*/
struct iwl_mod_params {
int swcrypto;
@@ -143,6 +144,7 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
+ bool remove_when_gone;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 8928613e033e..0f9d56420c42 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -8,6 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
@@ -76,6 +79,10 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "fw/acpi.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/commands.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/img.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@@ -146,8 +153,8 @@ static const u8 iwl_ext_nvm_channels[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
-#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
-#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
#define NUM_2GHZ_CHANNELS 14
#define NUM_2GHZ_CHANNELS_EXT 14
#define FIRST_2GHZ_HT_MINUS 5
@@ -291,7 +298,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
- bool lar_supported, bool no_wide_in_5ghz)
+ u32 sbands_flags)
{
int ch_idx;
int n_channels = 0;
@@ -301,11 +308,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
const u8 *nvm_chan;
if (cfg->nvm_type != IWL_NVM_EXT) {
- num_of_ch = IWL_NUM_CHANNELS;
+ num_of_ch = IWL_NVM_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
- num_of_ch = IWL_NUM_CHANNELS_EXT;
+ num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
nvm_chan = &iwl_ext_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
}
@@ -315,11 +322,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (is_5ghz && !data->sku_cap_band_52GHz_enable)
+ if (is_5ghz && !data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
- if (no_wide_in_5ghz && is_5ghz) {
+ if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
+ is_5ghz) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@@ -328,7 +336,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (ch_flags & NVM_CHANNEL_160MHZ)
data->vht160_supported = true;
- if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
+ if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) &&
+ !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
* supported, hence we still want to add them to
@@ -358,7 +367,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
/* don't put limitations in case we're using LAR */
- if (!lar_supported)
+ if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
ch_idx, is_5ghz,
ch_flags, cfg);
@@ -454,17 +463,17 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
-void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
- u8 tx_chains, u8 rx_chains, bool lar_supported,
- bool no_wide_in_5ghz)
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 *nvm_ch_flags, u8 tx_chains,
+ u8 rx_chains, u32 sbands_flags)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
- lar_supported, no_wide_in_5ghz);
+ sbands_flags);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -490,7 +499,6 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
-IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
@@ -568,11 +576,15 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
dest[5] = hw_addr[0];
}
-void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
- struct iwl_nvm_data *data)
+static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data)
{
- __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
- __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
+ __le32 mac_addr0 =
+ cpu_to_le32(iwl_read32(trans,
+ trans->cfg->csr->mac_addr0_strap));
+ __le32 mac_addr1 =
+ cpu_to_le32(iwl_read32(trans,
+ trans->cfg->csr->mac_addr1_strap));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
/*
@@ -582,12 +594,13 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
if (is_valid_ether_addr(data->hw_addr))
return;
- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
+ mac_addr0 = cpu_to_le32(iwl_read32(trans,
+ trans->cfg->csr->mac_addr0_otp));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans,
+ trans->cfg->csr->mac_addr1_otp));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
-IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr);
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
@@ -712,20 +725,20 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct device *dev = trans->dev;
struct iwl_nvm_data *data;
bool lar_enabled;
- bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku, radio_cfg;
+ u32 sbands_flags = 0;
u16 lar_config;
const __le16 *ch_section;
if (cfg->nvm_type != IWL_NVM_EXT)
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
- IWL_NUM_CHANNELS,
+ IWL_NVM_NUM_CHANNELS,
GFP_KERNEL);
else
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
- IWL_NUM_CHANNELS_EXT,
+ IWL_NVM_NUM_CHANNELS_EXT,
GFP_KERNEL);
if (!data)
return NULL;
@@ -740,8 +753,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
rx_chains &= data->valid_rx_ant;
sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
- data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
- data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
@@ -786,8 +799,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
+ if (lar_fw_supported && lar_enabled)
+ sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+
+ if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw))
+ sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
+
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
- lar_fw_supported && lar_enabled, no_wide_in_5ghz);
+ sbands_flags);
data->calib_version = 255;
return data;
@@ -842,24 +861,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
return flags;
}
+struct regdb_ptrs {
+ struct ieee80211_wmm_rule *rule;
+ u32 token;
+};
+
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
- int num_of_ch, __le32 *channels, u16 fw_mcc)
+ int num_of_ch, __le32 *channels, u16 fw_mcc,
+ u16 geo_info)
{
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
iwl_ext_nvm_channels : iwl_nvm_channels;
- struct ieee80211_regdomain *regd;
- int size_of_regd;
+ struct ieee80211_regdomain *regd, *copy_rd;
+ int size_of_regd, regd_to_copy, wmms_to_copy;
+ int size_of_wmms = 0;
struct ieee80211_reg_rule *rule;
+ struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
+ struct regdb_ptrs *regdb_ptrs;
enum nl80211_band band;
int center_freq, prev_center_freq = 0;
- int valid_rules = 0;
+ int valid_rules = 0, n_wmms = 0;
+ int i;
bool new_rule;
int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
- IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
+ IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@@ -875,10 +904,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
sizeof(struct ieee80211_regdomain) +
num_of_ch * sizeof(struct ieee80211_reg_rule);
- regd = kzalloc(size_of_regd, GFP_KERNEL);
+ if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
+ size_of_wmms =
+ num_of_ch * sizeof(struct ieee80211_wmm_rule);
+
+ regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
+ regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
+ if (!regdb_ptrs) {
+ copy_rd = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ /* set alpha2 from FW. */
+ regd->alpha2[0] = fw_mcc >> 8;
+ regd->alpha2[1] = fw_mcc & 0xff;
+
+ wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
+
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -927,14 +972,357 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
nvm_chan[ch_idx], ch_flags);
+
+ if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) ||
+ band == NL80211_BAND_2GHZ)
+ continue;
+
+ if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
+ &regdb_ptrs[n_wmms].token, wmm_rule)) {
+ /* Add only new rules */
+ for (i = 0; i < n_wmms; i++) {
+ if (regdb_ptrs[i].token ==
+ regdb_ptrs[n_wmms].token) {
+ rule->wmm_rule = regdb_ptrs[i].rule;
+ break;
+ }
+ }
+ if (i == n_wmms) {
+ rule->wmm_rule = wmm_rule;
+ regdb_ptrs[n_wmms++].rule = wmm_rule;
+ wmm_rule++;
+ }
+ }
}
regd->n_reg_rules = valid_rules;
+ regd->n_wmm_rules = n_wmms;
- /* set alpha2 from FW. */
- regd->alpha2[0] = fw_mcc >> 8;
- regd->alpha2[1] = fw_mcc & 0xff;
+ /*
+ * Narrow down regdom for unused regulatory rules to prevent hole
+ * between reg rules to wmm rules.
+ */
+ regd_to_copy = sizeof(struct ieee80211_regdomain) +
+ valid_rules * sizeof(struct ieee80211_reg_rule);
+
+ wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
+
+ copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+ if (!copy_rd) {
+ copy_rd = ERR_PTR(-ENOMEM);
+ goto out;
+ }
- return regd;
+ memcpy(copy_rd, regd, regd_to_copy);
+ memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
+ wmms_to_copy);
+
+ d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
+ s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
+
+ for (i = 0; i < regd->n_reg_rules; i++) {
+ if (!regd->reg_rules[i].wmm_rule)
+ continue;
+
+ copy_rd->reg_rules[i].wmm_rule = d_wmm +
+ (regd->reg_rules[i].wmm_rule - s_wmm) /
+ sizeof(struct ieee80211_wmm_rule);
+ }
+
+out:
+ kfree(regdb_ptrs);
+ kfree(regd);
+ return copy_rd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+
+#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
+#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
+#define MAX_NVM_FILE_LEN 16384
+
+void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
+ unsigned int len)
+{
+#define IWL_4165_DEVICE_ID 0x5501
+#define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
+
+ if (section == NVM_SECTION_TYPE_PHY_SKU &&
+ hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
+ (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
+ /* OTP 0x52 bug work around: it's a 1x1 device */
+ data[3] = ANT_B | (ANT_B << 4);
+}
+IWL_EXPORT_SYMBOL(iwl_nvm_fixups);
+
+/*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
+ * HOW TO CREATE THE NVM FILE FORMAT:
+ * ------------------------------
+ * 1. create hex file, format:
+ * 3800 -> header
+ * 0000 -> header
+ * 5a40 -> data
+ *
+ * rev - 6 bit (word1)
+ * len - 10 bit (word1)
+ * id - 4 bit (word2)
+ * rsv - 12 bit (word2)
+ *
+ * 2. flip 8bits with 8 bits per line to get the right NVM file format
+ *
+ * 3. create binary file from the hex file
+ *
+ * 4. save as "iNVM_xxx.bin" under /lib/firmware
+ */
+int iwl_read_external_nvm(struct iwl_trans *trans,
+ const char *nvm_file_name,
+ struct iwl_nvm_section *nvm_sections)
+{
+ int ret, section_size;
+ u16 section_id;
+ const struct firmware *fw_entry;
+ const struct {
+ __le16 word1;
+ __le16 word2;
+ u8 data[];
+ } *file_sec;
+ const u8 *eof;
+ u8 *temp;
+ int max_section_size;
+ const __le32 *dword_buff;
+
+#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
+#define NVM_WORD2_ID(x) (x >> 12)
+#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
+#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
+#define NVM_HEADER_0 (0x2A504C54)
+#define NVM_HEADER_1 (0x4E564D2A)
+#define NVM_HEADER_SIZE (4 * sizeof(u32))
+
+ IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
+
+ /* Maximal size depends on NVM version */
+ if (trans->cfg->nvm_type != IWL_NVM_EXT)
+ max_section_size = IWL_MAX_NVM_SECTION_SIZE;
+ else
+ max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
+
+ /*
+ * Obtain NVM image via request_firmware. Since we already used
+ * request_firmware_nowait() for the firmware binary load and only
+ * get here after that we assume the NVM request can be satisfied
+ * synchronously.
+ */
+ ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
+ if (ret) {
+ IWL_ERR(trans, "ERROR: %s isn't available %d\n",
+ nvm_file_name, ret);
+ return ret;
+ }
+
+ IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n",
+ nvm_file_name, fw_entry->size);
+
+ if (fw_entry->size > MAX_NVM_FILE_LEN) {
+ IWL_ERR(trans, "NVM file too large\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ eof = fw_entry->data + fw_entry->size;
+ dword_buff = (__le32 *)fw_entry->data;
+
+ /* some NVM file will contain a header.
+ * The header is identified by 2 dwords header as follow:
+ * dword[0] = 0x2A504C54
+ * dword[1] = 0x4E564D2A
+ *
+ * This header must be skipped when providing the NVM data to the FW.
+ */
+ if (fw_entry->size > NVM_HEADER_SIZE &&
+ dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+ dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+ file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+ IWL_INFO(trans, "NVM Manufacturing date %08X\n",
+ le32_to_cpu(dword_buff[3]));
+
+ /* nvm file validation, dword_buff[2] holds the file version */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
+ le32_to_cpu(dword_buff[2]) < 0xE4A) {
+ ret = -EFAULT;
+ goto out;
+ }
+ } else {
+ file_sec = (void *)fw_entry->data;
+ }
+
+ while (true) {
+ if (file_sec->data > eof) {
+ IWL_ERR(trans,
+ "ERROR - NVM file too short for section header\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* check for EOF marker */
+ if (!file_sec->word1 && !file_sec->word2) {
+ ret = 0;
+ break;
+ }
+
+ if (trans->cfg->nvm_type != IWL_NVM_EXT) {
+ section_size =
+ 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+ section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+ } else {
+ section_size = 2 * EXT_NVM_WORD2_LEN(
+ le16_to_cpu(file_sec->word2));
+ section_id = EXT_NVM_WORD1_ID(
+ le16_to_cpu(file_sec->word1));
+ }
+
+ if (section_size > max_section_size) {
+ IWL_ERR(trans, "ERROR - section too large (%d)\n",
+ section_size);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!section_size) {
+ IWL_ERR(trans, "ERROR - section empty\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (file_sec->data + section_size > eof) {
+ IWL_ERR(trans,
+ "ERROR - NVM file too short for section (%d bytes)\n",
+ section_size);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
+ "Invalid NVM section ID %d\n", section_id)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
+
+ kfree(nvm_sections[section_id].data);
+ nvm_sections[section_id].data = temp;
+ nvm_sections[section_id].length = section_size;
+
+ /* advance to the next section */
+ file_sec = (void *)(file_sec->data + section_size);
+ }
+out:
+ release_firmware(fw_entry);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
+
+struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+ const struct iwl_fw *fw)
+{
+ struct iwl_nvm_get_info cmd = {};
+ struct iwl_nvm_get_info_rsp *rsp;
+ struct iwl_nvm_data *nvm;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
+ };
+ int ret;
+ bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ u32 mac_flags;
+ u32 sbands_flags = 0;
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+ "Invalid payload len in NVM response from FW %d",
+ iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
+ IWL_INFO(trans, "OTP is empty\n");
+
+ nvm = kzalloc(sizeof(*nvm) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!nvm) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ iwl_set_hw_address_from_csr(trans, nvm);
+ /* TODO: if platform NVM has MAC address - override it here */
+
+ if (!is_valid_ether_addr(nvm->hw_addr)) {
+ IWL_ERR(trans, "no valid mac address was found\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
+
+ /* Initialize general data */
+ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+
+ /* Initialize MAC sku data */
+ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
+ nvm->sku_cap_11ac_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
+ nvm->sku_cap_11n_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_band_24ghz_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
+ nvm->sku_cap_band_52ghz_enable =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+ nvm->sku_cap_mimo_disabled =
+ !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+
+ /* Initialize PHY sku data */
+ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
+ nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
+
+ if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+ nvm->lar_enabled = true;
+ sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+ }
+
+ iwl_init_sbands(trans->dev, trans->cfg, nvm,
+ rsp->regulatory.channel_profile,
+ nvm->valid_tx_ant & fw->valid_tx_ant,
+ nvm->valid_rx_ant & fw->valid_rx_ant,
+ sbands_flags);
+
+ iwl_free_resp(&hcmd);
+ return nvm;
+
+err_free:
+ kfree(nvm);
+out:
+ iwl_free_resp(&hcmd);
+ return ERR_PTR(ret);
+}
+IWL_EXPORT_SYMBOL(iwl_get_nvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 306736c7a042..234d1009a9de 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,17 @@
#include "iwl-eeprom-parse.h"
/**
+ * enum iwl_nvm_sbands_flags - modification flags for the channel profiles
+ *
+ * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled
+ * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz
+ */
+enum iwl_nvm_sbands_flags {
+ IWL_NVM_SBANDS_FLAGS_LAR = BIT(0),
+ IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
+};
+
+/**
* iwl_parse_nvm_data - parse NVM data and return values
*
* This function parses all NVM values we need and then
@@ -83,30 +96,47 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
/**
- * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on
- */
-void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
- struct iwl_nvm_data *data);
-
-/**
- * iwl_init_sbands - parse and set all channel profiles
- */
-void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
- u8 tx_chains, u8 rx_chains, bool lar_supported,
- bool no_wide_in_5ghz);
-
-/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
- * to be fed into the regulatory core. An ERR_PTR is returned on error.
+ * to be fed into the regulatory core. In case the geo_info is set handle
+ * accordingly. An ERR_PTR is returned on error.
* If not given to the regulatory core, the user is responsible for freeing
* the regdomain returned here with kfree.
*/
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
- int num_of_ch, __le32 *channels, u16 fw_mcc);
+ int num_of_ch, __le32 *channels, u16 fw_mcc,
+ u16 geo_info);
+/**
+ * struct iwl_nvm_section - describes an NVM section in memory.
+ *
+ * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
+ * and saved for later use by the driver. Not all NVM sections are saved
+ * this way, only the needed ones.
+ */
+struct iwl_nvm_section {
+ u16 length;
+ const u8 *data;
+};
+
+/**
+ * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections
+ */
+int iwl_read_external_nvm(struct iwl_trans *trans,
+ const char *nvm_file_name,
+ struct iwl_nvm_section *nvm_sections);
+void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
+ unsigned int len);
+
+/**
+ * iwl_get_nvm - retrieve NVM data from firmware
+ *
+ * Allocates a new iwl_nvm_data structure, fills it with
+ * NVM data, and returns it to caller.
+ */
+struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+ const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index c25ed1a0bbb0..1b9c627ee34d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -554,7 +554,7 @@ struct iwl_trans_ops {
/* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
- int cmd_id,
+ int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
@@ -691,6 +691,8 @@ enum iwl_plat_pm_mode {
* @wide_cmd_header: true when ucode supports wide command header format
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
+ * @iml_len: the length of the image loader
+ * @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
@@ -735,6 +737,9 @@ struct iwl_trans {
u8 num_rx_queues;
+ size_t iml_len;
+ u8 *iml;
+
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
char dev_cmd_pool_name[50];
@@ -952,8 +957,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
- int cmd_id,
- unsigned int queue_wdg_timeout)
+ int cmd_id, int size,
+ unsigned int wdg_timeout)
{
might_sleep();
@@ -965,7 +970,7 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
- return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
+ return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 890dbfff3a06..016e03a5034f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -279,6 +279,8 @@ struct iwl_bt_iterator_data {
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
bool primary_ll;
+ u8 primary_load;
+ u8 secondary_load;
};
static inline
@@ -295,6 +297,30 @@ void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
+#define MVM_COEX_TCM_PERIOD (HZ * 10)
+
+static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
+ struct iwl_bt_iterator_data *data)
+{
+ unsigned long now = jiffies;
+
+ if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
+ return;
+
+ mvm->bt_coex_last_tcm_ts = now;
+
+ /* We assume here that we don't have more than 2 vifs on 2.4GHz */
+
+ /* if the primary is low latency, it will stay primary */
+ if (data->primary_ll)
+ return;
+
+ if (data->primary_load >= data->secondary_load)
+ return;
+
+ swap(data->primary, data->secondary);
+}
+
/* must be called under rcu_read_lock */
static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
@@ -385,6 +411,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* there is low latency vif - we will be secondary */
data->secondary = chanctx_conf;
}
+
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
return;
}
@@ -398,6 +429,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
/*
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
@@ -449,6 +484,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
+
if (data.primary) {
struct ieee80211_chanctx_conf *chan = data.primary;
if (WARN_ON(!chan->def.chan)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 96b52a275ee3..d61ff66ce07b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -69,6 +69,8 @@
#include <linux/ieee80211.h>
+#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
@@ -112,6 +114,11 @@
#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
+#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
+#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 2efe9b099556..3fcf489f3120 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -35,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -693,6 +690,14 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
}
+ if (wowlan->any) {
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+ }
+
return 0;
}
@@ -1097,6 +1102,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* make sure the d0i3 exit work is not pending */
flush_work(&mvm->d0i3_exit_work);
+ iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
@@ -2014,6 +2020,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mvm_resume_tcm(mvm);
+
iwl_fw_runtime_resume(&mvm->fwrt);
return ret;
@@ -2042,6 +2050,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ iwl_mvm_pause_tcm(mvm, true);
+
iwl_fw_runtime_suspend(&mvm->fwrt);
/* start pseudo D3 */
@@ -2104,6 +2114,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
+ iwl_mvm_resume_tcm(mvm);
+
iwl_fw_runtime_resume(&mvm->fwrt);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index f7fcf700196b..798605c4f122 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -269,6 +269,8 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+ mvm->tcm.result.load[mvmvif->id]);
pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
pos += scnprintf(buf+pos, bufsz-pos,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 0e6401cd7ccc..1c4178f20441 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1728,6 +1728,27 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
+static ssize_t
+iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
+ unsigned int pos = 0;
+ size_t bufsz = sizeof(buf);
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++)
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ mvm->uapsd_noagg_bssids[i].addr);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
@@ -1762,6 +1783,8 @@ MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
+MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
+
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
@@ -1972,6 +1995,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
goto err;
+ MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
+
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
bcast_dir = debugfs_create_dir("bcast_filtering",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 3c59109bea20..866c91c923be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,6 +81,8 @@
#include "mvm.h"
#include "fw/dbg.h"
#include "iwl-phy-db.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
#define MVM_UCODE_ALIVE_TIMEOUT HZ
#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
@@ -381,7 +385,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
/* Load NVM to NIC if needed */
if (mvm->nvm_file_name) {
- iwl_mvm_read_external_nvm(mvm);
+ iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
iwl_mvm_load_nvm_to_nic(mvm);
}
@@ -410,7 +415,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
/* Read the NVM only at driver load time, no need to do this twice */
if (!IWL_MVM_PARSE_NVM && read_nvm) {
- mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt);
+ mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
if (IS_ERR(mvm->nvm_data)) {
ret = PTR_ERR(mvm->nvm_data);
mvm->nvm_data = NULL;
@@ -1093,6 +1098,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
+ mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 51b30424575b..5f0701c992a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
resp->channels,
- __le16_to_cpu(resp->mcc));
+ __le16_to_cpu(resp->mcc),
+ __le16_to_cpu(resp->geo_info));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -952,6 +953,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
+ if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
+ iwl_mvm_sta_from_mac80211(sta)->sta_id) {
+ struct iwl_mvm_vif *mvmvif;
+ u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
+
+ mdata->opened_rx_ba_sessions = true;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
+ }
if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
@@ -1435,6 +1446,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = vif;
}
+ iwl_mvm_tcm_add_vif(mvm, vif);
+
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = true;
@@ -1486,6 +1499,10 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_prepare_mac_removal(mvm, vif);
+ if (!(vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC))
+ iwl_mvm_tcm_rm_vif(mvm, vif);
+
mutex_lock(&mvm->mutex);
if (mvm->bf_allowed_vif == mvmvif) {
@@ -2535,6 +2552,16 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const u8 *bssid)
{
+ int i;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_tcm_mac *mdata;
+
+ mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ mdata->opened_rx_ba_sessions = false;
+ }
+
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return;
@@ -2549,6 +2576,13 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
}
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
+ if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+ }
+
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d2cf751db68d..6a4ba160c59e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,7 +92,9 @@
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
-#include "fw/debugfs.h"
+#include "iwl-nvm-parse.h"
+
+#include <linux/average.h>
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
@@ -444,6 +448,8 @@ struct iwl_mvm_vif {
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
+ struct delayed_work uapsd_nonagg_detected_wk;
+
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
@@ -503,18 +509,6 @@ enum iwl_mvm_sched_scan_pass_all_states {
};
/**
- * struct iwl_nvm_section - describes an NVM section in memory.
- *
- * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
- * and saved for later use by the driver. Not all NVM sections are saved
- * this way, only the needed ones.
- */
-struct iwl_nvm_section {
- u16 length;
- const u8 *data;
-};
-
-/**
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
@@ -595,6 +589,53 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE,
};
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ } tx;
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ u32 last_ampdu_ref;
+ } rx;
+ struct {
+ /* track AP's transfer in client mode */
+ u64 rx_bytes;
+ struct ewma_rate rate;
+ bool detected;
+ } uapsd_nonagg_detect;
+ bool opened_rx_ba_sessions;
+};
+
+struct iwl_mvm_tcm {
+ struct delayed_work work;
+ spinlock_t lock; /* used when time elapsed */
+ unsigned long ts; /* timestamp when period ends */
+ unsigned long ll_ts;
+ unsigned long uapsd_nonagg_ts;
+ bool paused;
+ struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+ struct {
+ u32 elapsed; /* milliseconds for this TCM period */
+ u32 airtime[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
+ enum iwl_mvm_traffic_load global_load;
+ bool low_latency[NUM_MAC_INDEX_DRIVER];
+ bool change[NUM_MAC_INDEX_DRIVER];
+ bool global_change;
+ } result;
+};
+
/**
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
@@ -829,7 +870,10 @@ struct iwl_mvm {
unsigned int scan_status;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* For CDB this is low band scan type, for non-CDB - type. */
enum iwl_mvm_scan_type scan_type;
+ enum iwl_mvm_scan_type hb_scan_type;
+
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
struct delayed_work scan_timeout_dwork;
@@ -978,6 +1022,13 @@ struct iwl_mvm {
*/
bool temperature_test; /* Debug test temperature is enabled */
+ unsigned long bt_coex_last_tcm_ts;
+ struct iwl_mvm_tcm tcm;
+
+ u8 uapsd_noagg_bssid_write_idx;
+ struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
+ __aligned(2);
+
struct iwl_time_quota_cmd last_quota_cmd;
#ifdef CONFIG_NL80211_TESTMODE
@@ -1293,6 +1344,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
}
+static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO: should this be the same as iwl_mvm_is_cdb_supported()?
+ * but then there's a little bit of code in scan that won't make
+ * any sense...
+ */
+ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@@ -1438,7 +1499,6 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
int iwl_nvm_init(struct iwl_mvm *mvm);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
-int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm);
static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
{
@@ -1771,6 +1831,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_low_latency_cause cause);
/* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
+
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
{
@@ -1906,6 +1968,17 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 5bfe5306524c..cf48517944ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,9 +78,7 @@
#include "fw/acpi.h"
/* Default NVM size to read */
-#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
-#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
@@ -229,19 +229,6 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
return 0;
}
-static void iwl_mvm_nvm_fixups(struct iwl_mvm *mvm, unsigned int section,
- u8 *data, unsigned int len)
-{
-#define IWL_4165_DEVICE_ID 0x5501
-#define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
-
- if (section == NVM_SECTION_TYPE_PHY_SKU &&
- mvm->trans->hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
- (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
- /* OTP 0x52 bug work around: it's a 1x1 device */
- data[3] = ANT_B | (ANT_B << 4);
-}
-
/*
* Reads an NVM section completely.
* NICs prior to 7000 family doesn't have a real NVM, but just read
@@ -282,7 +269,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
offset += ret;
}
- iwl_mvm_nvm_fixups(mvm, section, data, offset);
+ iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM section %d read completed\n", section);
@@ -355,184 +342,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
lar_enabled);
}
-#define MAX_NVM_FILE_LEN 16384
-
-/*
- * Reads external NVM from a file into mvm->nvm_sections
- *
- * HOW TO CREATE THE NVM FILE FORMAT:
- * ------------------------------
- * 1. create hex file, format:
- * 3800 -> header
- * 0000 -> header
- * 5a40 -> data
- *
- * rev - 6 bit (word1)
- * len - 10 bit (word1)
- * id - 4 bit (word2)
- * rsv - 12 bit (word2)
- *
- * 2. flip 8bits with 8 bits per line to get the right NVM file format
- *
- * 3. create binary file from the hex file
- *
- * 4. save as "iNVM_xxx.bin" under /lib/firmware
- */
-int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
-{
- int ret, section_size;
- u16 section_id;
- const struct firmware *fw_entry;
- const struct {
- __le16 word1;
- __le16 word2;
- u8 data[];
- } *file_sec;
- const u8 *eof;
- u8 *temp;
- int max_section_size;
- const __le32 *dword_buff;
-
-#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
-#define NVM_WORD2_ID(x) (x >> 12)
-#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
-#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
-#define NVM_HEADER_0 (0x2A504C54)
-#define NVM_HEADER_1 (0x4E564D2A)
-#define NVM_HEADER_SIZE (4 * sizeof(u32))
-
- IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
-
- /* Maximal size depends on NVM version */
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
- max_section_size = IWL_MAX_NVM_SECTION_SIZE;
- else
- max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
-
- /*
- * Obtain NVM image via request_firmware. Since we already used
- * request_firmware_nowait() for the firmware binary load and only
- * get here after that we assume the NVM request can be satisfied
- * synchronously.
- */
- ret = request_firmware(&fw_entry, mvm->nvm_file_name,
- mvm->trans->dev);
- if (ret) {
- IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
- mvm->nvm_file_name, ret);
- return ret;
- }
-
- IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
- mvm->nvm_file_name, fw_entry->size);
-
- if (fw_entry->size > MAX_NVM_FILE_LEN) {
- IWL_ERR(mvm, "NVM file too large\n");
- ret = -EINVAL;
- goto out;
- }
-
- eof = fw_entry->data + fw_entry->size;
- dword_buff = (__le32 *)fw_entry->data;
-
- /* some NVM file will contain a header.
- * The header is identified by 2 dwords header as follow:
- * dword[0] = 0x2A504C54
- * dword[1] = 0x4E564D2A
- *
- * This header must be skipped when providing the NVM data to the FW.
- */
- if (fw_entry->size > NVM_HEADER_SIZE &&
- dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
- dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
- file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
- IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
- IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
- le32_to_cpu(dword_buff[3]));
-
- /* nvm file validation, dword_buff[2] holds the file version */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
- le32_to_cpu(dword_buff[2]) < 0xE4A) {
- ret = -EFAULT;
- goto out;
- }
- } else {
- file_sec = (void *)fw_entry->data;
- }
-
- while (true) {
- if (file_sec->data > eof) {
- IWL_ERR(mvm,
- "ERROR - NVM file too short for section header\n");
- ret = -EINVAL;
- break;
- }
-
- /* check for EOF marker */
- if (!file_sec->word1 && !file_sec->word2) {
- ret = 0;
- break;
- }
-
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
- section_size =
- 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
- section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
- } else {
- section_size = 2 * EXT_NVM_WORD2_LEN(
- le16_to_cpu(file_sec->word2));
- section_id = EXT_NVM_WORD1_ID(
- le16_to_cpu(file_sec->word1));
- }
-
- if (section_size > max_section_size) {
- IWL_ERR(mvm, "ERROR - section too large (%d)\n",
- section_size);
- ret = -EINVAL;
- break;
- }
-
- if (!section_size) {
- IWL_ERR(mvm, "ERROR - section empty\n");
- ret = -EINVAL;
- break;
- }
-
- if (file_sec->data + section_size > eof) {
- IWL_ERR(mvm,
- "ERROR - NVM file too short for section (%d bytes)\n",
- section_size);
- ret = -EINVAL;
- break;
- }
-
- if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
- "Invalid NVM section ID %d\n", section_id)) {
- ret = -EINVAL;
- break;
- }
-
- temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
- if (!temp) {
- ret = -ENOMEM;
- break;
- }
-
- iwl_mvm_nvm_fixups(mvm, section_id, temp, section_size);
-
- kfree(mvm->nvm_sections[section_id].data);
- mvm->nvm_sections[section_id].data = temp;
- mvm->nvm_sections[section_id].length = section_size;
-
- /* advance to the next section */
- file_sec = (void *)(file_sec->data + section_size);
- }
-out:
- release_firmware(fw_entry);
- return ret;
-}
-
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
{
@@ -585,7 +394,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
break;
}
- iwl_mvm_nvm_fixups(mvm, section, temp, ret);
+ iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
@@ -624,14 +433,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* Only if PNVM selected in the mod param - load external NVM */
if (mvm->nvm_file_name) {
/* read External NVM file from the mod param */
- ret = iwl_mvm_read_external_nvm(mvm);
+ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
if (ret) {
mvm->nvm_file_name = nvm_file_C;
if ((ret == -EFAULT || ret == -ENOENT) &&
mvm->nvm_file_name) {
/* in case nvm file was failed try again */
- ret = iwl_mvm_read_external_nvm(mvm);
+ ret = iwl_read_external_nvm(mvm->trans,
+ mvm->nvm_file_name,
+ mvm->nvm_sections);
if (ret)
return ret;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 224bfa1bcf53..ff1e518096c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -250,6 +250,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
+ iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
+
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
@@ -667,6 +670,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+ spin_lock_init(&mvm->tcm.lock);
+ INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ mvm->tcm.uapsd_nonagg_ts = jiffies;
+
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
/*
@@ -730,6 +739,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+ trans->iml = mvm->fw->iml;
+ trans->iml_len = mvm->fw->iml_len;
+
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -859,6 +871,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ cancel_delayed_work_sync(&mvm->tcm.work);
+
iwl_mvm_tof_clean(mvm);
mutex_destroy(&mvm->mutex);
@@ -1026,8 +1040,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
- else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
- iwl_mvm_tlc_update_notif(mvm, pkt);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
@@ -1432,6 +1444,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = false;
}
+ iwl_mvm_pause_tcm(mvm, true);
/* make sure we have no running tx while configuring the seqno */
synchronize_net();
@@ -1615,6 +1628,7 @@ out:
/* the FW might have updated the regdomain */
iwl_mvm_update_changed_regdom(mvm);
+ iwl_mvm_resume_tcm(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index fb5745660509..b8b2b819e8e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,14 +67,14 @@ static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
{
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
- return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+ return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
- return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+ return IWL_TLC_MNG_CH_WIDTH_80MHZ;
case IEEE80211_STA_RX_BW_40:
- return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+ return IWL_TLC_MNG_CH_WIDTH_40MHZ;
case IEEE80211_STA_RX_BW_20:
default:
- return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+ return IWL_TLC_MNG_CH_WIDTH_20MHZ;
}
}
@@ -85,7 +87,9 @@ static u8 rs_fw_set_active_chains(u8 chains)
if (chains & ANT_B)
fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
if (chains & ANT_C)
- fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+ WARN(false,
+ "tlc offload doesn't support antenna C. chains: 0x%x\n",
+ chains);
return fw_chains;
}
@@ -97,13 +101,13 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
u8 supp = 0;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
- supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
- supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
- supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
return supp;
}
@@ -114,9 +118,7 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
bool vht_ena = vht_cap && vht_cap->vht_supported;
- u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
- IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
- IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+ u16 flags = 0;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
@@ -129,16 +131,11 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
- (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
- (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
- flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
-
return flags;
}
static
-int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
int nss)
{
u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
@@ -160,15 +157,16 @@ int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
return 0;
}
-static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
- struct ieee80211_sta_vht_cap *vht_cap,
- struct iwl_tlc_config_cmd *cmd)
+static void
+rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
+ const struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd *cmd)
{
u16 supp;
int i, highest_mcs;
for (i = 0; i < sta->rx_nss; i++) {
- if (i == MAX_RS_ANT_NUM)
+ if (i == MAX_NSS)
break;
highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
@@ -179,7 +177,9 @@ static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
- cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+ cmd->ht_rates[i][0] = cpu_to_le16(supp);
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
}
}
@@ -190,8 +190,8 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
int i;
unsigned long tmp;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
/* non HT rates */
supp = 0;
@@ -199,45 +199,40 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
for_each_set_bit(i, &tmp, BITS_PER_LONG)
supp |= BIT(sband->bitrates[i].hw_value);
- cmd->non_ht_supp_rates = cpu_to_le16(supp);
+ cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
- /* HT/VHT rates */
if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
- cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
- cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
-static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
-{
- u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
- struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
- .sta_id = sta_id,
- .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
- .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
- };
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
- if (ret)
- IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
-}
-
-void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tlc_update_notif *notif;
+ struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct iwl_lq_sta_rs_fw *lq_sta;
+ u32 flags;
rcu_read_lock();
notif = (void *)pkt->data;
- mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ notif->sta_id);
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (!mvmsta) {
IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
@@ -245,14 +240,30 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
goto out;
}
+ flags = le32_to_cpu(notif->flags);
+
lq_sta = &mvmsta->lq_sta.rs_fw;
- if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
- lq_sta->last_rate_n_flags =
- le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+ if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
lq_sta->last_rate_n_flags);
}
+
+ if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
+ u16 size = le32_to_cpu(notif->amsdu_size);
+
+ if (WARN_ON(sta->max_amsdu_len < size))
+ goto out;
+
+ mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->max_amsdu_len = size;
+
+ IWL_DEBUG_RATE(mvm,
+ "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+ le32_to_cpu(notif->amsdu_size), size,
+ mvmsta->amsdu_enabled);
+ }
out:
rcu_read_unlock();
}
@@ -267,12 +278,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband;
struct iwl_tlc_config_cmd cfg_cmd = {
.sta_id = mvmsta->sta_id,
- .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+ .max_ch_width = rs_fw_bw_from_sta_bw(sta),
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
- .max_supp_ss = sta->rx_nss,
- .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+ .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ .amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
@@ -287,8 +298,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
-
- rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
}
int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 5d776ec1840f..af7bc3b7b0c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1715,12 +1715,18 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ /*
+ * In case TLC offload is not active amsdu_enabled is either 0xFFFF
+ * or 0, since there is no per-TID alg.
+ */
if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
scale_action == RS_ACTION_DOWNSCALE)
- mvmsta->tlc_amsdu = false;
+ mvmsta->amsdu_enabled = 0;
else
- mvmsta->tlc_amsdu = true;
+ mvmsta->amsdu_enabled = 0xFFFF;
+
+ mvmsta->max_amsdu_len = sta->max_amsdu_len;
}
/*
@@ -3134,7 +3140,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = mvmsta->sta_id;
- mvmsta->tlc_amsdu = false;
+ mvmsta->amsdu_enabled = 0;
+ mvmsta->max_amsdu_len = sta->max_amsdu_len;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
@@ -3744,7 +3751,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
(lq_sta->is_agg) ? "AGG on" : "",
- (mvmsta->tlc_amsdu) ? "AMSDU on" : "");
+ (mvmsta->amsdu_enabled) ? "AMSDU on" : "");
}
desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index fb18cb8c233d..5e89141656c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -454,5 +454,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band);
int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
-void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d26833c5ce1f..bfb163419c67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -254,6 +254,74 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return 0;
}
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr, u32 len,
+ struct iwl_rx_phy_info *phy_info,
+ u32 rate_n_flags)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_tcm_mac *mdata;
+ int mac;
+ int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+ struct iwl_mvm_vif *mvmvif;
+ /* expected throughput in 100Kbps, single stream, 20 MHz */
+ static const u8 thresh_tpt[] = {
+ 9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
+ };
+ u16 thr;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+ mdata = &mvm->tcm.data[mac];
+ mdata->rx.pkts[ac]++;
+
+ /* count the airtime only once for each ampdu */
+ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+ mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+ mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+ }
+
+ if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mdata->opened_rx_ba_sessions ||
+ mdata->uapsd_nonagg_detect.detected ||
+ (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
+ mvmsta->sta_id != mvmvif->ap_sta_id)
+ return;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
+ thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+ RATE_HT_MCS_NSS_POS);
+ } else {
+ if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
+ ARRAY_SIZE(thresh_tpt)))
+ return;
+ thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
+ thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS);
+ }
+
+ thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >>
+ RATE_MCS_CHAN_WIDTH_POS);
+
+ mdata->uapsd_nonagg_detect.rx_bytes += len;
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
+}
+
static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
struct sk_buff *skb,
u32 status)
@@ -408,6 +476,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
NULL);
}
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+ rate_n_flags);
+
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
}
@@ -654,7 +728,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
int expected_size;
int i;
u8 *energy;
- __le32 *bytes, *air_time;
+ __le32 *bytes;
+ __le32 *air_time;
__le32 flags;
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -752,6 +827,32 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
sta->avg_energy = energy[i];
}
rcu_read_unlock();
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
+ return;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 airtime = le32_to_cpu(air_time[i]);
+ u32 rx_bytes = le32_to_cpu(bytes[i]);
+
+ mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+ if (airtime) {
+ /* re-init every time to store rate from FW */
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+ rx_bytes * 8 / airtime);
+ }
+
+ mdata->rx.airtime += airtime;
+ }
+ spin_unlock(&mvm->tcm.lock);
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 4a4ccfd11e5b..bb63e75a9b7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -112,7 +112,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
return -1;
if (ieee80211_is_data_qos(hdr->frame_control))
- tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ieee80211_get_tid(hdr);
else
tid = 0;
@@ -151,17 +151,9 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ len -= 2;
pad_len = 2;
-
- /*
- * If the device inserted padding it means that (it thought)
- * the 802.11 header wasn't a multiple of 4 bytes long. In
- * this case, reserve two bytes at the start of the SKB to
- * align the payload properly in case we end up copying it.
- */
- skb_reserve(skb, pad_len);
}
- len -= pad_len;
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@@ -347,8 +339,7 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
if (ieee80211_is_data_qos(hdr->frame_control))
/* frame has qos control */
- tid = *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_TID_MASK;
+ tid = ieee80211_get_tid(hdr);
else
tid = IWL_MAX_TID_COUNT;
@@ -628,7 +619,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
bool last_subframe =
desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
- u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ u8 tid = ieee80211_get_tid(hdr);
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
struct iwl_mvm_reorder_buf_entry *entries;
@@ -867,6 +858,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ /*
+ * If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, 2);
+ }
+
rx_status = IEEE80211_SKB_RXCB(skb);
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc,
@@ -941,6 +942,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
/*
* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index b31f0ffbbbf0..4b3753d78d03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,9 +20,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
+ * along with this program
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
@@ -76,12 +75,6 @@
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
-enum iwl_mvm_traffic_load {
- IWL_MVM_TRAFFIC_LOW,
- IWL_MVM_TRAFFIC_MEDIUM,
- IWL_MVM_TRAFFIC_HIGH,
-};
-
#define IWL_SCAN_DWELL_ACTIVE 10
#define IWL_SCAN_DWELL_PASSIVE 110
#define IWL_SCAN_DWELL_FRAGMENTED 44
@@ -123,7 +116,9 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
};
struct iwl_mvm_scan_params {
+ /* For CDB this is low band scan type, for non-CDB - type. */
enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type;
u32 n_channels;
u16 delay;
int n_ssids;
@@ -152,7 +147,7 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
return (void *)&cmd->v7.data;
- if (iwl_mvm_has_new_tx_api(mvm))
+ if (iwl_mvm_cdb_scan_api(mvm))
return (void *)&cmd->v6.data;
return (void *)&cmd->v1.data;
@@ -169,7 +164,7 @@ iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
if (iwl_mvm_is_adaptive_dwell_supported(mvm))
return &cmd->v7.channel;
- if (iwl_mvm_has_new_tx_api(mvm))
+ if (iwl_mvm_cdb_scan_api(mvm))
return &cmd->v6.channel;
return &cmd->v1.channel;
@@ -234,15 +229,21 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
- return IWL_MVM_TRAFFIC_LOW;
+ return mvm->tcm.result.global_load;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ return mvm->tcm.result.band_load[band];
}
static enum
-iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+ enum iwl_mvm_traffic_load load,
+ bool low_latency)
{
int global_cnt = 0;
- enum iwl_mvm_traffic_load load;
- bool low_latency;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
@@ -251,9 +252,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
- load = iwl_mvm_get_traffic_load(mvm);
- low_latency = iwl_mvm_low_latency(mvm);
-
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
return IWL_SCAN_TYPE_FRAGMENTED;
@@ -264,25 +262,57 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
return IWL_SCAN_TYPE_WILD;
}
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load(mvm);
+ low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
+ bool p2p_device,
+ enum nl80211_band band)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load_band(mvm, band);
+ low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
static int
iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
struct cfg80211_scan_request *req,
struct iwl_mvm_scan_params *params)
{
+ u32 duration = scan_timing[params->type].max_out_time;
+
if (!req->duration)
return 0;
- if (req->duration_mandatory &&
- req->duration > scan_timing[params->type].max_out_time) {
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ u32 hb_time = scan_timing[params->hb_type].max_out_time;
+
+ duration = min_t(u32, duration, hb_time);
+ }
+
+ if (req->duration_mandatory && req->duration > duration) {
IWL_DEBUG_SCAN(mvm,
"Measurement scan - too long dwell %hu (max out time %u)\n",
req->duration,
- scan_timing[params->type].max_out_time);
+ duration);
return -EOPNOTSUPP;
}
- return min_t(u32, (u32)req->duration,
- scan_timing[params->type].max_out_time);
+ return min_t(u32, (u32)req->duration, duration);
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -437,6 +467,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw, &info);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
@@ -1030,22 +1061,38 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
- enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
- cfg->out_of_channel_time[0] =
- cpu_to_le32(scan_timing[type].max_out_time);
- cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cfg->suspend_time[1] =
- cpu_to_le32(scan_timing[type].suspend_time);
- cfg->out_of_channel_time[1] =
+ enum iwl_mvm_scan_type lb_type, hb_type;
+
+ lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_5GHZ);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].suspend_time);
+
+ cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].max_out_time);
+ cfg->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].suspend_time);
+ } else {
+ enum iwl_mvm_scan_type type =
+ iwl_mvm_get_scan_type(mvm, false);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[type].suspend_time);
}
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
@@ -1065,7 +1112,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
};
- enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
int num_channels =
mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
@@ -1075,10 +1123,20 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
- if (type == mvm->scan_type)
- return 0;
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ NL80211_BAND_5GHZ);
+ if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
+ return 0;
+ } else {
+ type = iwl_mvm_get_scan_type(mvm, false);
+ if (type == mvm->scan_type)
+ return 0;
+ }
- if (iwl_mvm_has_new_tx_api(mvm))
+ if (iwl_mvm_cdb_scan_api(mvm))
cmd_size = sizeof(struct iwl_scan_config);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
@@ -1107,10 +1165,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
- if (iwl_mvm_has_new_tx_api(mvm)) {
- flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
- SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
- SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+ /*
+ * Check for fragmented scan on LMAC2 - high band.
+ * LMAC1 - low band is checked above.
+ */
+ if (iwl_mvm_cdb_scan_api(mvm)) {
+ if (iwl_mvm_is_cdb_supported(mvm))
+ flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
@@ -1123,8 +1186,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (!ret)
+ if (!ret) {
mvm->scan_type = type;
+ mvm->hb_scan_type = hb_type;
+ }
kfree(cfg);
return ret;
@@ -1178,7 +1243,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- hb_timing = &scan_timing[params->type];
+ hb_timing = &scan_timing[params->hb_type];
cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(hb_timing->max_out_time);
@@ -1208,7 +1273,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm)) {
- hb_timing = &scan_timing[params->type];
+ hb_timing = &scan_timing[params->hb_type];
cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(hb_timing->max_out_time);
@@ -1216,7 +1281,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(hb_timing->suspend_time);
}
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_cdb_scan_api(mvm)) {
cmd->v6.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
@@ -1232,6 +1297,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cpu_to_le32(timing->suspend_time);
}
}
+
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1262,11 +1332,12 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
+ if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
- if (iwl_mvm_is_cdb_supported(mvm))
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
- }
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
fw_has_capa(&mvm->fw->ucode_capa,
@@ -1497,6 +1568,21 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
iwl_force_nmi(mvm->trans);
}
+static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ bool p2p)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->type =
+ iwl_mvm_get_scan_type_band(mvm, p2p,
+ NL80211_BAND_2GHZ);
+ params->hb_type =
+ iwl_mvm_get_scan_type_band(mvm, p2p,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->type = iwl_mvm_get_scan_type(mvm, p2p);
+ }
+}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1544,9 +1630,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
- params.type =
- iwl_mvm_get_scan_type(mvm,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params,
+ vif->type == NL80211_IFTYPE_P2P_DEVICE);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@@ -1568,6 +1653,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
return ret;
+ iwl_mvm_pause_tcm(mvm, false);
+
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret) {
/* If the scan failed, it usually means that the FW was unable
@@ -1575,6 +1662,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ iwl_mvm_resume_tcm(mvm);
return ret;
}
@@ -1638,9 +1726,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
- params.type =
- iwl_mvm_get_scan_type(mvm,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params,
+ vif->type == NL80211_IFTYPE_P2P_DEVICE);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@@ -1711,6 +1798,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1827,7 +1915,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
- else if (iwl_mvm_has_new_tx_api(mvm))
+ else if (iwl_mvm_cdb_scan_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 80067eb9ea05..4ddd2c427b83 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -2466,6 +2468,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
+ if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
+ iwl_mvm_has_new_tx_api(mvm)) {
+ u8 ac = tid_to_mac80211_ac[tid];
+
+ ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+ if (ret)
+ return ret;
+ }
+
spin_lock_bh(&mvmsta->lock);
/* possible race condition - we entered D0i3 while starting agg */
@@ -2887,7 +2898,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u32 sta_id,
struct ieee80211_key_conf *key, bool mcast,
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
- u8 key_offset)
+ u8 key_offset, bool mfp)
{
union {
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
@@ -2960,6 +2971,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+ if (mfp)
+ key_flags |= cpu_to_le16(STA_KEY_MFP);
u.cmd.common.key_offset = key_offset;
u.cmd.common.key_flags = key_flags;
@@ -3101,11 +3114,13 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
u16 p1k[5];
u32 sta_id;
+ bool mfp = false;
if (sta) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
sta_id = mvm_sta->sta_id;
+ mfp = sta->mfp;
} else if (vif->type == NL80211_IFTYPE_AP &&
!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -3127,7 +3142,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
- seq.tkip.iv32, p1k, 0, key_offset);
+ seq.tkip.iv32, p1k, 0, key_offset,
+ mfp);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
@@ -3135,11 +3151,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
- 0, NULL, 0, key_offset);
+ 0, NULL, 0, key_offset, mfp);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
- 0, NULL, 0, key_offset);
+ 0, NULL, 0, key_offset, mfp);
}
return ret;
@@ -3366,6 +3382,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvm_sta;
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ bool mfp = sta ? sta->mfp : false;
rcu_read_lock();
@@ -3373,7 +3390,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
if (WARN_ON_ONCE(!mvm_sta))
goto unlock;
iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
- iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
+ iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
+ mfp);
unlock:
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 5ffd6adbc383..60502c81dfce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -391,7 +391,9 @@ struct iwl_mvm_rxq_dup_data {
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
- * @tlc_amsdu: true if A-MSDU is allowed
+ * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
+ * In case TLC offload is not active it is either 0xFFFF or 0.
+ * @max_amsdu_len: max AMSDU length
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleep_tx_count: the number of frames that we told the firmware to let out
* even when that station is asleep. This is useful in case the queue
@@ -436,7 +438,8 @@ struct iwl_mvm_sta {
bool tt_tx_protection;
bool disable_tx;
- bool tlc_amsdu;
+ u16 amsdu_enabled;
+ u16 max_amsdu_len;
bool sleeping;
bool associated;
u8 agg_tids;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 795065974d78..df4c60496f72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -767,16 +767,16 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 snap_ip_tcp, pad;
unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
- u8 *qc, tid, txf;
+ u8 tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
- if (!sta->max_amsdu_len ||
+ if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
- (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len))
+ (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
/*
@@ -790,8 +790,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
}
- qc = ieee80211_get_qos_ctl(hdr);
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ieee80211_get_tid(hdr);
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
@@ -803,7 +802,11 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
- max_amsdu_len = sta->max_amsdu_len;
+ if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
+ !(mvmsta->amsdu_enabled & BIT(tid)))
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ max_amsdu_len = mvmsta->max_amsdu_len;
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
@@ -841,7 +844,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
if (num_subframes > 1)
- *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
@@ -930,6 +933,32 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
return false;
}
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ int airtime)
+{
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ if (mvm->tcm.paused)
+ return;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ mdata->tx.airtime += airtime;
+}
+
+static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
+{
+ u32 ac = tid_to_mac80211_ac[tid];
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ mdata->tx.pkts[ac]++;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -976,9 +1005,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* assignment of MGMT TID
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- u8 *qc = NULL;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ieee80211_get_tid(hdr);
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta;
@@ -1067,6 +1094,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
+ iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+
return 0;
drop_unlock_sta:
@@ -1469,6 +1498,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+
if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
@@ -1610,6 +1642,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
}
rcu_read_unlock();
@@ -1800,6 +1834,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
out_unlock:
rcu_read_unlock();
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d99d9ea78e4c..b002a7afb5f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -278,8 +280,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
u8 ind = last_idx;
int i;
- for (i = 0; i < MAX_RS_ANT_NUM; i++) {
- ind = (ind + 1) % MAX_RS_ANT_NUM;
+ for (i = 0; i < MAX_ANT_NUM; i++) {
+ ind = (ind + 1) % MAX_ANT_NUM;
if (valid & BIT(ind))
return ind;
}
@@ -520,15 +522,15 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
/* set INIT_DONE flag */
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
/* and wait for clock stabilization */
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
25000);
if (err < 0) {
IWL_DEBUG_INFO(trans,
@@ -728,12 +730,14 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
.sta_id = sta_id,
.tid = tid,
};
- int queue;
+ int queue, size = IWL_DEFAULT_QUEUE_SIZE;
- if (cmd.tid == IWL_MAX_TID_COUNT)
+ if (cmd.tid == IWL_MAX_TID_COUNT) {
cmd.tid = IWL_MGMT_TID;
+ size = IWL_MGMT_QUEUE_SIZE;
+ }
queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
- SCD_QUEUE_CFG, timeout);
+ SCD_QUEUE_CFG, size, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1074,23 +1078,48 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_power_update_mac(mvm);
}
+struct iwl_mvm_low_latency_iter {
+ bool result;
+ bool result_per_band[NUM_NL80211_BANDS];
+};
+
static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
- bool *result = _data;
+ struct iwl_mvm_low_latency_iter *result = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum nl80211_band band;
- if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
- *result = true;
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ result->result = true;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ band = mvmvif->phy_ctxt->channel->band;
+ result->result_per_band[band] = true;
+ }
}
bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
{
- bool result = false;
+ struct iwl_mvm_low_latency_iter data = {};
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_ll_iter, &result);
+ iwl_mvm_ll_iter, &data);
- return result;
+ return data.result;
+}
+
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ struct iwl_mvm_low_latency_iter data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &data);
+
+ return data.result_per_band[band];
}
struct iwl_bss_iter_data {
@@ -1429,6 +1458,387 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
sta->addr, tid);
}
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+ if (!elapsed)
+ return 0;
+
+ return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+ u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+ if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+ return IWL_MVM_TRAFFIC_HIGH;
+ if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+ return IWL_MVM_TRAFFIC_MEDIUM;
+
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+struct iwl_mvm_tcm_iter_data {
+ struct iwl_mvm *mvm;
+ bool any_sent;
+};
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_tcm_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+ if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+ if (!mvm->tcm.result.change[mvmvif->id] &&
+ prev == low_latency) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ return;
+ }
+
+ if (prev != low_latency) {
+ /* this sends traffic load and updates quota as well */
+ iwl_mvm_update_low_latency(mvm, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+ } else {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+
+ data->any_sent = true;
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tcm_iter_data data = {
+ .mvm = mvm,
+ .any_sent = false,
+ };
+
+ mutex_lock(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iter, &data);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif,
+ uapsd_nonagg_detected_wk.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+ mvm = mvmvif->mvm;
+
+ if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
+ return;
+
+ /* remember that this AP is broken */
+ memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
+ vif->bss_conf.bssid, ETH_ALEN);
+ mvm->uapsd_noagg_bssid_write_idx++;
+ if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
+ mvm->uapsd_noagg_bssid_write_idx = 0;
+
+ iwl_mvm_connection_loss(mvm, vif,
+ "AP isn't using AMPDU with uAPSD enabled");
+}
+
+static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int *mac_id = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mvmvif->id != *mac_id)
+ return;
+
+ if (!vif->bss_conf.assoc)
+ return;
+
+ if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
+ return;
+
+ if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
+ return;
+
+ mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
+ IWL_INFO(mvm,
+ "detected AP should do aggregation but isn't, likely due to U-APSD\n");
+ schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
+}
+
+static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
+ unsigned int elapsed,
+ int mac)
+{
+ u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
+ u64 tpt;
+ unsigned long rate;
+
+ rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
+
+ if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
+ mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
+ return;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ tpt = 8 * bytes; /* kbps */
+ do_div(tpt, elapsed);
+ rate *= 1000; /* kbps */
+ if (tpt < 22 * rate / 100)
+ return;
+ } else {
+ /*
+ * the rate here is actually the threshold, in 100Kbps units,
+ * so do the needed conversion from bytes to 100Kbps:
+ * 100kb = bits / (100 * 1000),
+ * 100kbps = 100kb / (msecs / 1000) ==
+ * (bits / (100 * 1000)) / (msecs / 1000) ==
+ * bits / (100 * msecs)
+ */
+ tpt = (8 * bytes);
+ do_div(tpt, elapsed * 100);
+ if (tpt < rate)
+ return;
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_uapsd_agg_disconnect_iter, &mac);
+}
+
+static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 *band = _data;
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
+}
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+ unsigned long ts,
+ bool handle_uapsd)
+{
+ unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+ unsigned int uapsd_elapsed =
+ jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
+ u32 total_airtime = 0;
+ u32 band_airtime[NUM_NL80211_BANDS] = {0};
+ u32 band[NUM_MAC_INDEX_DRIVER] = {0};
+ int ac, mac, i;
+ bool low_latency = false;
+ enum iwl_mvm_traffic_load load, band_load;
+ bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+ if (handle_ll)
+ mvm->tcm.ll_ts = ts;
+ if (handle_uapsd)
+ mvm->tcm.uapsd_nonagg_ts = ts;
+
+ mvm->tcm.result.elapsed = elapsed;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iterator,
+ &band);
+
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ u32 vo_vi_pkts = 0;
+ u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+ total_airtime += airtime;
+ band_airtime[band[mac]] += airtime;
+
+ load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+ mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+ mvm->tcm.result.load[mac] = load;
+ mvm->tcm.result.airtime[mac] = airtime;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+ vo_vi_pkts += mdata->rx.pkts[ac] +
+ mdata->tx.pkts[ac];
+
+ /* enable immediately with enough packets but defer disabling */
+ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+ mvm->tcm.result.low_latency[mac] = true;
+ else if (handle_ll)
+ mvm->tcm.result.low_latency[mac] = false;
+
+ if (handle_ll) {
+ /* clear old data */
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ }
+ low_latency |= mvm->tcm.result.low_latency[mac];
+
+ if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
+ iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
+ mac);
+ /* clear old data */
+ if (handle_uapsd)
+ mdata->uapsd_nonagg_detect.rx_bytes = 0;
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+
+ load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+ mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
+ mvm->tcm.result.global_load = load;
+
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
+ mvm->tcm.result.band_load[i] = band_load;
+ }
+
+ /*
+ * If the current load isn't low we need to force re-evaluation
+ * in the TCM period, so that we can return to low load if there
+ * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+ * triggered by traffic).
+ */
+ if (load != IWL_MVM_TRAFFIC_LOW)
+ return MVM_TCM_PERIOD;
+ /*
+ * If low-latency is active we need to force re-evaluation after
+ * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+ * when there's no traffic at all.
+ */
+ if (low_latency)
+ return MVM_LL_PERIOD;
+ /*
+ * Otherwise, we don't need to run the work struct because we're
+ * in the default "idle" state - traffic indication is low (which
+ * also covers the "no traffic" case) and low-latency is disabled
+ * so there's no state that may need to be disabled when there's
+ * no traffic at all.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever one of the
+ * two timeouts expire (if there's traffic at all.)
+ */
+ return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+ unsigned long ts = jiffies;
+ bool handle_uapsd =
+ time_after(ts, mvm->tcm.uapsd_nonagg_ts +
+ msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
+
+ spin_lock(&mvm->tcm.lock);
+ if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ spin_unlock(&mvm->tcm.lock);
+ return;
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_request_statistics(mvm, true))
+ handle_uapsd = false;
+ mutex_unlock(&mvm->mutex);
+ }
+
+ spin_lock(&mvm->tcm.lock);
+ /* re-check if somebody else won the recheck race */
+ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ /* calculate statistics */
+ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+ handle_uapsd);
+
+ /* the memset needs to be visible before the timestamp */
+ smp_mb();
+ mvm->tcm.ts = ts;
+ if (work_delay)
+ schedule_delayed_work(&mvm->tcm.work, work_delay);
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ tcm.work);
+
+ iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.paused = true;
+ spin_unlock_bh(&mvm->tcm.lock);
+ if (with_cancel)
+ cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+ int mac;
+
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+ /* The TCM data needs to be reset before "paused" flag changes */
+ smp_mb();
+ mvm->tcm.paused = false;
+ spin_unlock_bh(&mvm->tcm.lock);
+}
+
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
+ iwl_mvm_tcm_uapsd_nonagg_detected_wk);
+}
+
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
+}
+
+
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
bool ps_disabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 5ef216f3a60b..3fc4343581ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);
+ TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index ca3b64ff4dad..45ea32796cda 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -383,7 +383,8 @@ struct iwl_self_init_dram {
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
- * @tx_cmd_queue_size: the size of the tx command queue
+ * @in_rescan: true if we have triggered a device rescan
+ * @scheduled_for_removal: true if we have scheduled a device removal
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -466,6 +467,8 @@ struct iwl_trans_pcie {
u32 hw_mask;
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
u16 tx_cmd_queue_size;
+ bool in_rescan;
+ bool scheduled_for_removal;
};
static inline struct iwl_trans_pcie *
@@ -537,7 +540,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
@@ -822,7 +824,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
- int cmd_id,
+ int cmd_id, int size,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index f25ce3a1ea50..f772d70a65e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -201,7 +202,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
rxq->need_update = true;
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index cb4012541f45..b8e8dac2895d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,7 +94,8 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
/*
* Wait for clock stabilization; once stabilized, access to
@@ -100,8 +103,9 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
* and accesses to uCode SRAM.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ 25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
return ret;
@@ -143,7 +147,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
@@ -187,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f8a0234d332c..6e9a9ecfb11c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +75,7 @@
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -179,7 +182,8 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
+ BIT(trans->cfg->csr->flag_sw_reset));
usleep_range(5000, 6000);
}
@@ -372,7 +376,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
/*
* Wait for clock stabilization; once stabilized, access to
@@ -380,8 +385,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* and accesses to uCode SRAM.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ 25000);
if (ret < 0) {
IWL_ERR(trans, "Failed to init the card\n");
return ret;
@@ -459,15 +465,16 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
/*
* Wait for clock stabilization; once stabilized, access to
* device-internal resources is possible.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
25000);
if (WARN_ON(ret < 0)) {
IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
@@ -519,7 +526,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -541,11 +548,12 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+ iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
+ BIT(trans->cfg->csr->flag_stop_master));
- ret = iwl_poll_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
+ BIT(trans->cfg->csr->flag_master_dis),
+ BIT(trans->cfg->csr->flag_master_dis), 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -594,7 +602,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -1267,7 +1275,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1497,9 +1505,9 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
iwl_pcie_enable_rx_wake(trans, false);
@@ -1543,15 +1551,17 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_mac_access_req));
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ BIT(trans->cfg->csr->flag_init_done));
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
25000);
if (ret < 0) {
IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
@@ -1562,7 +1572,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1926,6 +1936,29 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
+struct iwl_trans_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+};
+
+static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
+{
+ struct iwl_trans_pcie_removal *removal =
+ container_of(wk, struct iwl_trans_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+
+ dev_err(&pdev->dev, "Device gone - attempting removal\n");
+ kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
+ pci_lock_rescan_remove();
+ pci_dev_put(pdev);
+ pci_stop_and_remove_bus_device(pdev);
+ pci_unlock_rescan_remove();
+
+ kfree(removal);
+ module_put(THIS_MODULE);
+}
+
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
unsigned long *flags)
{
@@ -1939,7 +1972,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -1964,15 +1997,59 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ BIT(trans->cfg->csr->flag_val_mac_access_en),
+ (BIT(trans->cfg->csr->flag_mac_clock_ready) |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
- iwl_trans_pcie_dump_regs(trans);
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
- iwl_read32(trans, CSR_GP_CNTRL));
+ cntrl);
+
+ iwl_trans_pcie_dump_regs(trans);
+
+ if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
+ struct iwl_trans_pcie_removal *removal;
+
+ if (trans_pcie->scheduled_for_removal)
+ goto err;
+
+ IWL_ERR(trans, "Device gone - scheduling removal!\n");
+
+ /*
+ * get a module reference to avoid doing this
+ * while unloading anyway and to avoid
+ * scheduling a work with code that's being
+ * removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans,
+ "Module is being unloaded - abort\n");
+ goto err;
+ }
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal) {
+ module_put(THIS_MODULE);
+ goto err;
+ }
+ /*
+ * we don't need to clear this flag, because
+ * the trans will be freed and reallocated.
+ */
+ trans_pcie->scheduled_for_removal = true;
+
+ removal->pdev = to_pci_dev(trans->dev);
+ INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+ } else {
+ iwl_write32(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ }
+
+err:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
return false;
}
@@ -2003,7 +2080,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -3232,12 +3309,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* id located at the AUX bus MISC address space.
*/
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ BIT(trans->cfg->csr->flag_init_done));
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
+ BIT(trans->cfg->csr->flag_mac_clock_ready),
25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index fabae0f60683..48890a1c825f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -488,6 +488,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
+ if (iwl_queue_space(txq) < txq->high_mark) {
+ iwl_stop_queue(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_queue_space(txq) < 3)) {
+ struct iwl_device_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
/* Set up driver data for this TFD */
@@ -523,9 +540,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
- if (iwl_queue_space(txq) < txq->high_mark)
- iwl_stop_queue(trans, txq);
-
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually.
@@ -555,15 +569,13 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
- int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ int i, cmd_pos, idx;
u16 copy_size, cmd_size, tb0_size;
bool had_nocopy = false;
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
-
- memset(tfd, 0, sizeof(*tfd));
+ struct iwl_tfh_tfd *tfd;
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@@ -634,6 +646,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
+ idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+ memset(tfd, 0, sizeof(*tfd));
+
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
@@ -957,6 +973,13 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
}
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
spin_unlock_bh(&txq->lock);
/* just in case - this queue may have been stopped */
@@ -972,7 +995,7 @@ static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ trans_pcie->tfd_size * txq->n_window,
txq->tfds, txq->dma_addr);
dma_free_coherent(dev,
sizeof(*txq->first_tb_bufs) * txq->n_window,
@@ -1020,7 +1043,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
- int cmd_id,
+ int cmd_id, int size,
unsigned int timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1046,12 +1069,12 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
return -ENOMEM;
}
- ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
+ ret = iwl_pcie_txq_alloc(trans, txq, size, false);
if (ret) {
IWL_ERR(trans, "Tx queue alloc failed\n");
goto error;
}
- ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
+ ret = iwl_pcie_txq_init(trans, txq, size, false);
if (ret) {
IWL_ERR(trans, "Tx queue init failed\n");
goto error;
@@ -1061,7 +1084,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
+ cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@@ -1152,8 +1175,6 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
struct iwl_txq *cmd_queue;
int txq_id = trans_pcie->cmd_queue, ret;
- iwl_pcie_set_tx_cmd_queue_size(trans);
-
/* alloc and init the command queue */
if (!trans_pcie->txq[txq_id]) {
cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
@@ -1162,8 +1183,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
return -ENOMEM;
}
trans_pcie->txq[txq_id] = cmd_queue;
- ret = iwl_pcie_txq_alloc(trans, cmd_queue,
- trans_pcie->tx_cmd_queue_size, true);
+ ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -1172,8 +1192,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
cmd_queue = trans_pcie->txq[txq_id];
}
- ret = iwl_pcie_txq_init(trans, cmd_queue,
- trans_pcie->tx_cmd_queue_size, true);
+ ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 1a566287993d..473fe7ccb07c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -273,7 +274,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
txq->need_update = true;
return;
}
@@ -495,6 +496,9 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
+ if (trans->cfg->use_tfh)
+ tfd_sz = trans_pcie->tfd_size * slots_num;
+
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
@@ -608,7 +612,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(trans->cfg->csr->flag_mac_access_req));
}
/*
@@ -950,8 +954,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
- TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@@ -970,21 +973,6 @@ error:
return ret;
}
-void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = TFD_CMD_SLOTS;
-
- if (trans->cfg->tx_cmd_queue_size)
- queue_size = trans->cfg->tx_cmd_queue_size;
-
- if (WARN_ON(!(is_power_of_2(queue_size) &&
- TFD_QUEUE_CB_SIZE(queue_size) > 0)))
- trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS;
- else
- trans_pcie->tx_cmd_queue_size = queue_size;
-}
-
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -992,8 +980,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
int txq_id, slots_num;
bool alloc = false;
- iwl_pcie_set_tx_cmd_queue_size(trans);
-
if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
@@ -1017,8 +1003,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
- TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
@@ -1166,7 +1151,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
* In that case, iwl_queue_space will be small again
* and we won't wake mac80211's queue.
*/
- iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
+ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
}
spin_lock_bh(&txq->lock);
@@ -1187,6 +1172,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
const struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const struct iwl_cfg *cfg = trans->cfg;
int ret;
lockdep_assert_held(&trans_pcie->reg_lock);
@@ -1204,19 +1190,19 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
- if (trans->cfg->base_params->apmg_wake_up_wa &&
+ if (cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(cfg->csr->flag_mac_access_req));
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ BIT(cfg->csr->flag_val_mac_access_en),
+ (BIT(cfg->csr->flag_mac_clock_ready) |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ BIT(cfg->csr->flag_mac_access_req));
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -2411,7 +2397,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
- if (amsdu) {
+ /*
+ * If gso_size wasn't set, don't give the frame "amsdu treatment"
+ * (adding subframes, etc.).
+ * This can happen in some testing flows when the amsdu was already
+ * pre-built, and we just need to send the resulting skb.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size) {
if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
out_meta, dev_cmd,
tb1_len)))
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7bfbc5916e9a..c26469b54ac9 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3237,6 +3237,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
GENL_SET_ERR_MSG(info,"MAC is no valid source addr");
NL_SET_BAD_ATTR(info->extack,
info->attrs[HWSIM_ATTR_PERM_ADDR]);
+ kfree(hwname);
return -EINVAL;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 7f7e9de2db1c..54a2297010d2 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -929,7 +929,7 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
adapter->rx_locked = false;
spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
- mwifiex_set_mac_address(priv, dev);
+ mwifiex_set_mac_address(priv, dev, false, NULL);
return 0;
}
@@ -1979,7 +1979,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
bss_cfg->bcast_ssid_ctl = 0;
break;
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
- /* firmware doesn't support this type of hidden SSID */
+ bss_cfg->bcast_ssid_ctl = 2;
+ break;
default:
kfree(bss_cfg);
return -EINVAL;
@@ -2978,7 +2979,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->netdev = dev;
if (!adapter->mfg_mode) {
- mwifiex_set_mac_address(priv, dev);
+ mwifiex_set_mac_address(priv, dev, false, NULL);
ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true);
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 7014f440e6f8..9cfcdf6bec52 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -25,7 +25,6 @@
#include "main.h"
#include "wmm.h"
#include "11n.h"
-#include "11ac.h"
static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 922e3d69fd84..b10baacb51c9 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -349,6 +349,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_SUPP_RATES:
case WLAN_EID_COUNTRY:
case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_ERP_INFO:
case WLAN_EID_EXT_SUPP_RATES:
case WLAN_EID_HT_CAPABILITY:
case WLAN_EID_HT_OPERATION:
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index b6484582845a..510f6b8e717d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -858,7 +858,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
/*
* CFG802.11 network device handler for data transmission.
*/
-static int
+static netdev_tx_t
mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -940,28 +940,32 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
int mwifiex_set_mac_address(struct mwifiex_private *priv,
- struct net_device *dev)
+ struct net_device *dev, bool external,
+ u8 *new_mac)
{
int ret;
u64 mac_addr, old_mac_addr;
- if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY)
- return -ENOTSUPP;
+ old_mac_addr = ether_addr_to_u64(priv->curr_addr);
- mac_addr = ether_addr_to_u64(priv->curr_addr);
- old_mac_addr = mac_addr;
+ if (external) {
+ mac_addr = ether_addr_to_u64(new_mac);
+ } else {
+ /* Internal mac address change */
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY)
+ return -ENOTSUPP;
- if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
- mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT);
+ mac_addr = old_mac_addr;
- if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) {
- /* Set mac address based on bss_type/bss_num */
- mac_addr ^= BIT_ULL(priv->bss_type + 8);
- mac_addr += priv->bss_num;
- }
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
+ mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT);
- if (mac_addr == old_mac_addr)
- goto done;
+ if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) {
+ /* Set mac address based on bss_type/bss_num */
+ mac_addr ^= BIT_ULL(priv->bss_type + 8);
+ mac_addr += priv->bss_num;
+ }
+ }
u64_to_ether_addr(mac_addr, priv->curr_addr);
@@ -976,7 +980,6 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv,
return ret;
}
-done:
ether_addr_copy(dev->dev_addr, priv->curr_addr);
return 0;
}
@@ -989,8 +992,7 @@ mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct sockaddr *hw_addr = addr;
- memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
- return mwifiex_set_mac_address(priv, dev);
+ return mwifiex_set_mac_address(priv, dev, true, hw_addr->sa_data);
}
/*
@@ -1331,7 +1333,10 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK;
priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
priv->num_tx_timeout = 0;
- ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr);
+ if (is_valid_ether_addr(dev->dev_addr))
+ ether_addr_copy(priv->curr_addr, dev->dev_addr);
+ else
+ ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr);
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9bde181700dc..8ae74ed78805 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -84,8 +84,8 @@ enum {
#define MWIFIEX_TIMER_10S 10000
#define MWIFIEX_TIMER_1S 1000
-#define MAX_TX_PENDING 100
-#define LOW_TX_PENDING 80
+#define MAX_TX_PENDING 400
+#define LOW_TX_PENDING 380
#define HIGH_RX_PENDING 50
#define LOW_RX_PENDING 20
@@ -1709,7 +1709,8 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
struct sk_buff *event_skb);
void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
int mwifiex_set_mac_address(struct mwifiex_private *priv,
- struct net_device *dev);
+ struct net_device *dev,
+ bool external, u8 *new_mac);
void mwifiex_devdump_tmo_func(unsigned long function_context);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 97a6199692ab..7538543d46fa 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1881,7 +1881,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, EVENT,
"info: Event length: %d\n", evt_len);
- if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE))
+ if (evt_len > MWIFIEX_EVENT_HEADER_LEN &&
+ evt_len < MAX_EVENT_SIZE)
memcpy(adapter->event_body, skb_cmd->data +
MWIFIEX_EVENT_HEADER_LEN, evt_len -
MWIFIEX_EVENT_HEADER_LEN);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index e8c8728db15a..e86217a6b9ca 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -108,7 +108,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
struct mwifiex_adapter *adapter = priv->adapter;
int len, i;
u32 eventcause = adapter->event_cause;
- struct station_info sinfo;
+ struct station_info *sinfo;
struct mwifiex_assoc_event *event;
struct mwifiex_sta_node *node;
u8 *deauth_mac;
@@ -117,7 +117,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
switch (eventcause) {
case EVENT_UAP_STA_ASSOC:
- memset(&sinfo, 0, sizeof(sinfo));
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
event = (struct mwifiex_assoc_event *)
(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
@@ -132,28 +135,31 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
len = ETH_ALEN;
if (len != -1) {
- sinfo.assoc_req_ies = &event->data[len];
- len = (u8 *)sinfo.assoc_req_ies -
+ sinfo->assoc_req_ies = &event->data[len];
+ len = (u8 *)sinfo->assoc_req_ies -
(u8 *)&event->frame_control;
- sinfo.assoc_req_ies_len =
+ sinfo->assoc_req_ies_len =
le16_to_cpu(event->len) - (u16)len;
}
}
- cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+ cfg80211_new_sta(priv->netdev, event->sta_addr, sinfo,
GFP_KERNEL);
node = mwifiex_add_sta_entry(priv, event->sta_addr);
if (!node) {
mwifiex_dbg(adapter, ERROR,
"could not create station entry!\n");
+ kfree(sinfo);
return -1;
}
- if (!priv->ap_11n_enabled)
+ if (!priv->ap_11n_enabled) {
+ kfree(sinfo);
break;
+ }
- mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
- sinfo.assoc_req_ies_len, node);
+ mwifiex_set_sta_ht_cap(priv, sinfo->assoc_req_ies,
+ sinfo->assoc_req_ies_len, node);
for (i = 0; i < MAX_NUM_TID; i++) {
if (node->is_11n_enabled)
@@ -163,6 +169,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+ kfree(sinfo);
break;
case EVENT_UAP_STA_DEAUTH:
deauth_mac = adapter->event_body +
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index fcb208d1f276..b67acc6189bf 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -103,6 +103,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
__skb_queue_head_init(&frames);
local_bh_disable();
+ rcu_read_lock();
spin_lock(&tid->lock);
mt76_rx_aggr_check_release(tid, &frames);
@@ -114,6 +115,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
REORDER_TIMEOUT);
mt76_rx_complete(dev, &frames, -1);
+ rcu_read_unlock();
local_bh_enable();
}
@@ -147,12 +149,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_sta *sta;
struct mt76_rx_tid *tid;
bool sn_less;
u16 seqno, head, size;
- u8 idx;
+ u8 ackp, idx;
__skb_queue_tail(frames, skb);
@@ -165,10 +168,17 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
return;
}
+ /* not part of a BA session */
+ ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
+ if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ return;
+
tid = rcu_dereference(wcid->aggr[status->tid]);
if (!tid)
return;
+ status->flag |= RX_FLAG_DUP_VALIDATED;
spin_lock_bh(&tid->lock);
if (tid->stopped)
@@ -258,6 +268,8 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
u8 size = tid->size;
int i;
+ cancel_delayed_work(&tid->reorder_work);
+
spin_lock_bh(&tid->lock);
tid->stopped = true;
@@ -272,8 +284,6 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
}
spin_unlock_bh(&tid->lock);
-
- cancel_delayed_work_sync(&tid->reorder_work);
}
void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 4f30cdcd2b53..915e61733131 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -213,7 +213,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
vht_cap->vht_supported = true;
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_SHORT_GI_80;
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
}
@@ -541,15 +542,13 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
return;
- if (ps) {
+ if (ps)
set_bit(MT_WCID_FLAG_PS, &wcid->flags);
- mt76_stop_tx_queues(dev, sta, true);
- } else {
+ else
clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
- }
- ieee80211_sta_ps_transition(sta, ps);
dev->drv->sta_ps(dev, sta, ps);
+ ieee80211_sta_ps_transition(sta, ps);
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
@@ -562,6 +561,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
if (queue >= 0)
napi = &dev->napi[queue];
+ spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
if (mt76_check_ccmp_pn(skb)) {
dev_kfree_skb(skb);
@@ -571,6 +571,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
sta = mt76_rx_convert(skb);
ieee80211_rx_napi(dev->hw, sta, skb, napi);
}
+ spin_unlock(&dev->rx_lock);
}
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 065ff78059c3..a74e6eef51e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -241,6 +241,7 @@ struct mt76_dev {
struct device *dev;
struct net_device napi_dev;
+ spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index 783b8122ec3c..a5d1255e4b9c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -117,7 +117,6 @@ struct mt76x2_dev {
u8 beacon_mask;
u8 beacon_data_mask;
- u32 rev;
u32 rxfilter;
u16 chainmask;
@@ -151,7 +150,7 @@ struct mt76x2_sta {
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
- return (dev->rev >> 16) == 0x7612;
+ return mt76_chip(&dev->mt76) == 0x7612;
}
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index 47f79d83fcb4..e9d426bbf91a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,7 +19,7 @@
#include "dma.h"
-#define MT_TXD_INFO_LEN GENMASK(13, 0)
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
#define MT_TXD_INFO_NEXT_VLD BIT(16)
#define MT_TXD_INFO_TX_BURST BIT(17)
#define MT_TXD_INFO_80211 BIT(19)
@@ -27,9 +27,8 @@
#define MT_TXD_INFO_CSO BIT(21)
#define MT_TXD_INFO_WIV BIT(24)
#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_TCO BIT(29)
-#define MT_TXD_INFO_UCO BIT(30)
-#define MT_TXD_INFO_ICO BIT(31)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 5bb50027c1e8..95d5f7d888f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -609,17 +609,13 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
memset(t, 0, sizeof(*t));
- val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+ if (!mt76x2_temp_tx_alc_enabled(dev))
return -EINVAL;
if (!mt76x2_ext_pa_enabled(dev, band))
return -EINVAL;
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
- if (!(val & BIT(7)))
- return -EINVAL;
-
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index d79122728dca..aa0b0c040375 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -159,6 +159,12 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
{
+ u16 val;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ if (!(val & BIT(15)))
+ return false;
+
return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TEMP_TX_ALC;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 934c331d995e..dd4c1127797e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -228,7 +228,7 @@ mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
-int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
{
static const u8 null_addr[ETH_ALEN] = {};
const u8 *macaddr = dev->mt76.macaddr;
@@ -370,12 +370,12 @@ void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
/* Wait for MAC to become idle */
for (i = 0; i < 300; i++) {
- if (mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
- continue;
-
- if (mt76_rr(dev, MT_BBP(IBI, 12)))
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ usleep_range(10, 20);
continue;
+ }
stopped = true;
break;
@@ -645,6 +645,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
dev->mt76.drv = &drv_ops;
mutex_init(&dev->mutex);
spin_lock_init(&dev->irq_lock);
+ spin_lock_init(&dev->mt76.rx_lock);
return dev;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index d18315652583..dab713756004 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -410,7 +410,6 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
break;
default:
return -EINVAL;
- break;
}
if (rate & MT_RXWI_RATE_SGI)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index 8a8a25e32d5f..c048cd06df6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -157,7 +157,6 @@ mt76x2_skb_tx_info(struct sk_buff *skb)
return (void *) info->status.status_driver_data;
}
-int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
int mt76x2_mac_start(struct mt76x2_dev *dev);
void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x2_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index 73c127f92613..81c58f865c64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -321,6 +321,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
int idx = msta->wcid.idx;
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index fcc37eb7ce0b..c1c38ca3330a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -73,16 +73,6 @@ int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
return rssi;
}
-static u8
-mt76x2_txpower_check(int value)
-{
- if (value < 0)
- return 0;
- if (value > 0x2f)
- return 0x2f;
- return value;
-}
-
static void
mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
{
@@ -102,6 +92,26 @@ mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
r->all[i] = limit;
}
+static int
+mt76x2_get_min_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++) {
+ if (!r->all[i])
+ continue;
+
+ if (ret)
+ ret = min(ret, r->all[i]);
+ else
+ ret = r->all[i];
+ }
+
+ return ret;
+}
+
+
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
{
enum nl80211_chan_width width = dev->mt76.chandef.width;
@@ -109,6 +119,7 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
struct mt76_rate_power t = {};
+ int base_power, gain;
mt76x2_get_power_info(dev, &txp, chan);
@@ -117,26 +128,32 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
else if (width == NL80211_CHAN_WIDTH_80)
delta = txp.delta_bw80;
- if (txp.target_power > dev->txpower_conf)
- delta -= txp.target_power - dev->txpower_conf;
-
mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
- txp.chain[0].delta);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
mt76x2_limit_rate_power(&t, dev->txpower_conf);
dev->txpower_cur = mt76x2_get_max_rate_power(&t);
- mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
- txp.chain[0].delta + delta));
- dev->target_power = txp.chain[0].target_power;
- dev->target_power_delta[0] = txp.chain[0].delta + delta;
- dev->target_power_delta[1] = txp.chain[1].delta + delta;
- dev->rate_power = t;
- txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
- txp.chain[0].delta + delta);
+ base_power = mt76x2_get_min_rate_power(&t);
+ delta += base_power - txp.chain[0].target_power;
+ txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
+ txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
+
+ gain = min(txp_0, txp_1);
+ if (gain < 0) {
+ base_power -= gain;
+ txp_0 -= gain;
+ txp_1 -= gain;
+ } else if (gain > 0x2f) {
+ base_power -= gain - 0x2f;
+ txp_0 = 0x2f;
+ txp_1 = 0x2f;
+ }
- txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
- txp.chain[1].delta + delta);
+ mt76x2_add_rate_power_offset(&t, -base_power);
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
+ dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
+ dev->rate_power = t;
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
@@ -180,7 +197,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
if (mt76x2_channel_silent(dev))
return false;
- if (chan->band == NL80211_BAND_2GHZ)
+ if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x2_ext_pa_enabled(dev, chan->band))
@@ -257,7 +274,6 @@ mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
}
- mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
if (mt76x2_ext_pa_enabled(dev, band))
pa_mode_adj = 0x04000000;
@@ -492,8 +508,10 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
- dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+ dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 +
+ (rssi0 << 8) / 16;
+ dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 +
+ (rssi1 << 8) / 16;
dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
dev->cal.avg_rssi[1]) / 512;
@@ -661,6 +679,14 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur));
+ /* init default values for temp compensation */
+ if (mt76x2_tssi_enabled(dev)) {
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ 0x38);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ 0x38);
+ }
+
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 4eef69bd8a9e..7ecd2d7c5db4 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -385,6 +385,10 @@ restart:
bool empty = false;
int cur;
+ if (test_bit(MT76_SCANNING, &dev->state) ||
+ test_bit(MT76_RESET, &dev->state))
+ return -EBUSY;
+
mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
@@ -422,12 +426,14 @@ void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
{
int len;
+ rcu_read_lock();
do {
if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
break;
len = mt76_txq_schedule_list(dev, hwq);
} while (len > 0);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index d55d7040a56d..148c36d3d2e5 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -453,7 +453,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
{
dev->bcn_freq_off = rxwi->freq_off;
dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
- dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+ ewma_rssi_add(&dev->avg_rssi, -rssi);
}
static int
@@ -503,7 +503,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
if (mt7601u_rx_is_our_beacon(dev, data))
mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
- dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+ ewma_rssi_add(&dev->avg_rssi, -rssi);
spin_unlock_bh(&dev->con_mon_lock);
return len;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 3c9ea40d9584..7b21016012c3 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -288,6 +288,12 @@ mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
mt7601u_agc_restore(dev);
clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ if (dev->freq_cal.enabled)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 9233744451a9..db317d8c1652 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -23,6 +23,7 @@
#include <linux/completion.h>
#include <net/mac80211.h>
#include <linux/debugfs.h>
+#include <linux/average.h>
#include "regs.h"
@@ -138,6 +139,8 @@ enum {
MT7601U_STATE_MORE_STATS,
};
+DECLARE_EWMA(rssi, 10, 4);
+
/**
* struct mt7601u_dev - adapter structure
* @lock: protects @wcid->tx_rate.
@@ -220,7 +223,7 @@ struct mt7601u_dev {
s8 bcn_freq_off;
u8 bcn_phy_mode;
- int avg_rssi; /* starts at 0 and converges */
+ struct ewma_rssi avg_rssi;
u8 agc_save;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index ca09a5d4305e..9d2f9a776ef1 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -795,6 +795,7 @@ mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
switch (phy_mode) {
case MT_PHY_TYPE_OFDM:
tx_rate += 4;
+ /* fall through */
case MT_PHY_TYPE_CCK:
reg = dev->rf_pa_mode[0];
break;
@@ -974,6 +975,7 @@ void mt7601u_agc_restore(struct mt7601u_dev *dev)
static void mt7601u_agc_tune(struct mt7601u_dev *dev)
{
u8 val = mt7601u_agc_default(dev);
+ long avg_rssi;
if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
return;
@@ -983,9 +985,12 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
* Rssi updates are only on beacons and U2M so should work...
*/
spin_lock_bh(&dev->con_mon_lock);
- if (dev->avg_rssi <= -70)
+ avg_rssi = ewma_rssi_read(&dev->avg_rssi);
+ WARN_ON_ONCE(avg_rssi == 0);
+ avg_rssi = -avg_rssi;
+ if (avg_rssi <= -70)
val -= 0x20;
- else if (dev->avg_rssi <= -60)
+ else if (avg_rssi <= -60)
val -= 0x10;
spin_unlock_bh(&dev->con_mon_lock);
@@ -1101,7 +1106,7 @@ void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
/* Start/stop collecting beacon data */
spin_lock_bh(&dev->con_mon_lock);
ether_addr_copy(dev->ap_bssid, info->bssid);
- dev->avg_rssi = 0;
+ ewma_rssi_init(&dev->avg_rssi);
dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
spin_unlock_bh(&dev->con_mon_lock);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 0398bece5782..5122dc798064 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -813,6 +813,9 @@ static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
+ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
+ return -ENOTSUPP;
+
ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
if (ret)
pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
@@ -909,6 +912,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
{
struct wiphy *wiphy;
+ if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ qtn_cfg80211_ops.start_radar_detection = NULL;
+
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
@@ -982,6 +988,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index cf26c15a84f8..b3bfb4faa918 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -76,7 +76,7 @@ static int qtnf_netdev_close(struct net_device *ndev)
/* Netdev handler for data transmission.
*/
-static int
+static netdev_tx_t
qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct qtnf_vif *vif;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index bcd415f96412..16617c44f81b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -34,12 +34,13 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
{
const u8 *sta_addr;
u16 frame_control;
- struct station_info sinfo = { 0 };
+ struct station_info *sinfo;
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
+ int ret = 0;
if (unlikely(len < sizeof(*sta_assoc))) {
pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
@@ -53,6 +54,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
return -EPROTO;
}
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
sta_addr = sta_assoc->sta_addr;
frame_control = le16_to_cpu(sta_assoc->frame_control);
@@ -61,9 +66,9 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
qtnf_sta_list_add(vif, sta_addr);
- sinfo.assoc_req_ies = NULL;
- sinfo.assoc_req_ies_len = 0;
- sinfo.generation = vif->generation;
+ sinfo->assoc_req_ies = NULL;
+ sinfo->assoc_req_ies_len = 0;
+ sinfo->generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
@@ -73,23 +78,27 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
tlv_value_len = le16_to_cpu(tlv->len);
tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len)
- return -EINVAL;
+ if (tlv_full_len > payload_len) {
+ ret = -EINVAL;
+ goto out;
+ }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set))
- return -EINVAL;
+ if (payload_len < sizeof(*ie_set)) {
+ ret = -EINVAL;
+ goto out;
+ }
ie_set = (const struct qlink_tlv_ie_set *)tlv;
ie_len = tlv_value_len -
(sizeof(*ie_set) - sizeof(ie_set->hdr));
if (ie_set->type == QLINK_IE_SET_ASSOC_REQ && ie_len) {
- sinfo.assoc_req_ies = ie_set->ie_data;
- sinfo.assoc_req_ies_len = ie_len;
+ sinfo->assoc_req_ies = ie_set->ie_data;
+ sinfo->assoc_req_ies_len = ie_len;
}
}
@@ -97,13 +106,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
- return -EINVAL;
+ if (payload_len) {
+ ret = -EINVAL;
+ goto out;
+ }
- cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, &sinfo,
+ cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, sinfo,
GFP_KERNEL);
- return 0;
+out:
+ kfree(sinfo);
+ return ret;
}
static int
@@ -443,6 +456,17 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
cfg80211_cac_event(vif->netdev, &chandef,
NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
break;
+ case QLINK_RADAR_CAC_STARTED:
+ if (vif->wdev.cac_started)
+ break;
+
+ if (!wiphy_ext_feature_isset(wiphy,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD))
+ break;
+
+ cfg80211_cac_event(vif->netdev, &chandef,
+ NL80211_RADAR_CAC_STARTED, GFP_KERNEL);
+ break;
default:
pr_warn("%s: unhandled radar event %u\n",
vif->netdev->name, ev->event);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index f117904d9120..6c1e139bb8f7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -1185,6 +1185,10 @@ static void qtnf_fw_work_handler(struct work_struct *work)
if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
QTN_FW_DL_TIMEOUT_MS)) {
pr_err("card is not ready\n");
+
+ if (!flashboot)
+ release_firmware(fw);
+
goto fw_load_fail;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 9bf3ae4d1b3b..9ab27e158023 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -68,10 +68,12 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_STA_INACT_TIMEOUT: device implements a logic to kick-out
* associated STAs due to inactivity. Inactivity timeout period is taken
* from QLINK_CMD_START_AP parameters.
+ * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
*/
enum qlink_hw_capab {
- QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
- QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
+ QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
+ QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
+ QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
};
enum qlink_iface_type {
@@ -1031,6 +1033,7 @@ enum qlink_radar_event {
QLINK_RADAR_CAC_ABORTED,
QLINK_RADAR_NOP_FINISHED,
QLINK_RADAR_PRE_CAC_EXPIRED,
+ QLINK_RADAR_CAC_STARTED,
};
/**
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index 6a8c93fb6a43..b05ed2f3025a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -94,6 +94,7 @@
#define REV_RT3390E 0x0211
#define REV_RT3593E 0x0211
#define REV_RT5390F 0x0502
+#define REV_RT5370G 0x0503
#define REV_RT5390R 0x1502
#define REV_RT5592C 0x0221
@@ -1193,10 +1194,10 @@
#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
-#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
-#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
-#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
-#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
+#define TX_PWR_CFG_3_UNKNOWN1 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_UNKNOWN2 FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_UNKNOWN3 FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_UNKNOWN4 FIELD32(0xf0000000)
/* bits for 3T devices */
#define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f)
#define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0)
@@ -1216,10 +1217,10 @@
* TX_PWR_CFG_4:
*/
#define TX_PWR_CFG_4 0x1324
-#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
-#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
-#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
-#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
+#define TX_PWR_CFG_4_UNKNOWN5 FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_UNKNOWN6 FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_UNKNOWN7 FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_UNKNOWN8 FIELD32(0x0000f000)
/* bits for 3T devices */
#define TX_PWR_CFG_4_STBC4_CH0 FIELD32(0x0000000f)
#define TX_PWR_CFG_4_STBC4_CH1 FIELD32(0x000000f0)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 429d07b651dd..a567bc273ffc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1557,12 +1557,13 @@ static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
}
-int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
+int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- int wcid;
- struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+ struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+ int wcid;
/*
* Limit global maximum TX AMPDU length to smallest value of all
@@ -1608,8 +1609,10 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2800_sta_add);
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta)
+int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
int wcid = sta_priv->wcid;
@@ -6220,8 +6223,9 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
- /* This chip has hardware antenna diversity*/
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+ /* These chips have hardware RX antenna diversity */
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) {
rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
@@ -8748,7 +8752,9 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00dev->default_ant.rx = ANTENNA_A;
}
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+ /* These chips have hardware RX antenna diversity */
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) {
rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */
rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 275e3969abdd..51d9c2a932cc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -208,9 +208,10 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
-int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
+int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta);
+int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags);
void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 1123e2bed803..e1a7ed7e4892 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -600,6 +600,7 @@ void rt2800mmio_kick_queue(struct data_queue *queue)
case QID_AC_VI:
case QID_AC_BE:
case QID_AC_BK:
+ WARN_ON_ONCE(rt2x00queue_empty(queue));
entry = rt2x00queue_get_entry(queue, Q_INDEX);
rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
entry->entry_idx);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 1172eefd1c1a..71b1affc3885 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -311,8 +311,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
- .sta_add = rt2x00mac_sta_add,
- .sta_remove = rt2x00mac_sta_remove,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
@@ -377,8 +377,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
- .sta_add = rt2800_sta_add,
- .sta_remove = rt2800_sta_remove,
};
static const struct rt2x00_ops rt2800pci_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 6848ebc83534..a502816214ab 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -150,8 +150,8 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
- .sta_add = rt2x00mac_sta_add,
- .sta_remove = rt2x00mac_sta_remove,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
@@ -216,8 +216,6 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
- .sta_add = rt2800_sta_add,
- .sta_remove = rt2800_sta_remove,
};
static const struct rt2x00_ops rt2800soc_ops = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index d901a41d36e4..98a7313fea4a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -797,8 +797,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
- .sta_add = rt2x00mac_sta_add,
- .sta_remove = rt2x00mac_sta_remove,
+ .sta_add = rt2800_sta_add,
+ .sta_remove = rt2800_sta_remove,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
.get_tsf = rt2800_get_tsf,
@@ -858,8 +858,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.config_erp = rt2800_config_erp,
.config_ant = rt2800_config_ant,
.config = rt2800_config,
- .sta_add = rt2800_sta_add,
- .sta_remove = rt2800_sta_remove,
};
static void rt2800usb_queue_init(struct data_queue *queue)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 1f38c338ca7a..a279a4363bc1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1457,10 +1457,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#else
#define rt2x00mac_set_key NULL
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
-int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index a971bc7a6b63..c380c1f56ba6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -739,8 +739,7 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
tx_queue_for_each(rt2x00dev, queue)
- if (!rt2x00queue_empty(queue))
- rt2x00queue_flush_queue(queue, drop);
+ rt2x00queue_flush_queue(queue, drop);
}
EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a6884e73d2ab..7c1f8f561d4a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -1000,6 +1000,8 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
(queue->qid == QID_AC_BE) ||
(queue->qid == QID_AC_BK);
+ if (rt2x00queue_empty(queue))
+ return;
/*
* If we are not supposed to drop any pending
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 8fce371749d3..f22fec093f1d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -1783,7 +1783,7 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type dismatch!!!, ");
+ "[BTCoex], PsTdma type mismatch!!!, ");
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"curPsTdma=%d, recordPsTdma=%d\n",
coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 73ec31972944..279fe01bb55e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -2766,7 +2766,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 202597cf8915..b5d65872db84 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -1584,11 +1584,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
/* tdma and coex table */
btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
- if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
- wifi_status)
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
- else
- btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+ btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
}
static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
@@ -1991,16 +1987,9 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
wifi_rssi_state =
btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2,
30, 0);
- if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
- (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- btc8821a1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1,
- 0, 1);
- } else {
- btc8821a1ant_limited_tx(btcoexist,
- NORMAL_EXEC, 1, 1,
- 0, 1);
- }
+ btc8821a1ant_limited_tx(btcoexist,
+ NORMAL_EXEC, 1, 1,
+ 0, 1);
} else {
btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
0, 0, 0, 0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 2202d5e18977..01a9d303603b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -2614,7 +2614,7 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
bool scan = false, link = false, roam = false;
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 8b6b07a936f5..b026e80940a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
{
- struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
-
- /* override ant_num / ant_path */
- if (mod_params->ant_sel) {
- rtlpriv->btcoexist.btc_info.ant_num =
- (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
-
- rtlpriv->btcoexist.btc_info.single_ant_path =
- (mod_params->ant_sel == 1 ? 0 : 1);
- }
return rtlpriv->btcoexist.btc_info.single_ant_path;
}
@@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
- struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
u8 num;
if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
@@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
else
num = 1;
- /* override ant_num / ant_path */
- if (mod_params->ant_sel)
- num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
-
return num;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index fd7928fdbd1a..3f2295fdb02d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2574,11 +2574,11 @@ void rtl92ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
rtlpriv->btcoexist.btc_info.btcoexist = 0;
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E;
- rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X2;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
} else {
rtlpriv->btcoexist.btc_info.btcoexist = 1;
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E;
- rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X1;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X1;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index e7bbbc95cdb1..b4f3f91b590e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
return false;
}
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
+
bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
@@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
rtlpriv->btcoexist.btc_info.single_ant_path =
- (value & 0x40); /*0xc3[6]*/
+ (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/
} else {
rtlpriv->btcoexist.btc_info.btcoexist = 0;
rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
- rtlpriv->btcoexist.btc_info.single_ant_path = 0;
+ rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN;
}
/* override ant_num / ant_path */
if (mod_params->ant_sel) {
rtlpriv->btcoexist.btc_info.ant_num =
- (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+ (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2);
rtlpriv->btcoexist.btc_info.single_ant_path =
- (mod_params->ant_sel == 1 ? 0 : 1);
+ (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d27e33960e77..208010fcde21 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2823,6 +2823,11 @@ enum bt_ant_num {
ANT_X1 = 1,
};
+enum bt_ant_path {
+ ANT_MAIN = 0,
+ ANT_AUX = 1,
+};
+
enum bt_co_type {
BT_2WIRE = 0,
BT_ISSC_3WIRE = 1,
@@ -2837,11 +2842,6 @@ enum bt_co_type {
BT_RTL8812A = 11,
};
-enum bt_total_ant_num {
- ANT_TOTAL_X2 = 0,
- ANT_TOTAL_X1 = 1
-};
-
enum bt_cur_state {
BT_OFF = 0,
BT_ON = 1,
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index d055099dadf1..c8ba148f8c6c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -73,6 +73,7 @@ int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
switch (msg_type) {
case COMMON_CARD_READY_IND:
rsi_dbg(INFO_ZONE, "common card ready received\n");
+ common->hibernate_resume = false;
rsi_handle_card_ready(common, msg);
break;
case SLEEP_NOTIFY_IND:
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 5dafd2e1306c..3644d7d99463 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -411,11 +411,30 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
if ((ieee80211_is_mgmt(wh->frame_control)) ||
(ieee80211_is_ctl(wh->frame_control)) ||
(ieee80211_is_qos_nullfunc(wh->frame_control))) {
+ if (ieee80211_is_assoc_req(wh->frame_control) ||
+ ieee80211_is_reassoc_req(wh->frame_control)) {
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+
+ common->eapol4_confirm = false;
+ rsi_hal_send_sta_notify_frame(common,
+ RSI_IFTYPE_STATION,
+ STA_CONNECTED, bss->bssid,
+ bss->qos, bss->aid, 0,
+ vif);
+ }
+
q_num = MGMT_SOFT_Q;
skb->priority = q_num;
+
+ if (rsi_prepare_mgmt_desc(common, skb)) {
+ rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
+ goto xmit_fail;
+ }
} else {
if (ieee80211_is_data_qos(wh->frame_control)) {
- tid = (skb->data[24] & IEEE80211_QOS_TID);
+ u8 *qos = ieee80211_get_qos_ctl(wh);
+
+ tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
skb->priority = TID_TO_WME_AC(tid);
} else {
tid = IEEE80211_NONQOS_TID;
@@ -433,6 +452,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
if (!rsta)
goto xmit_fail;
tx_params->sta_id = rsta->sta_id;
+ } else {
+ tx_params->sta_id = 0;
}
if (rsta) {
@@ -443,6 +464,14 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
tid, 0);
}
}
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ q_num = MGMT_SOFT_Q;
+ skb->priority = q_num;
+ }
+ if (rsi_prepare_data_desc(common, skb)) {
+ rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
+ goto xmit_fail;
+ }
}
if ((q_num < MGMT_SOFT_Q) &&
@@ -456,7 +485,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
}
rsi_core_queue_pkt(common, skb);
- rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
+ rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
rsi_set_event(&common->tx_thread.event);
return;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index de608ae365a4..0761e61591bd 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -45,7 +45,7 @@ int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
return status;
}
-static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
+int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
struct ieee80211_hdr *wh = NULL;
@@ -55,7 +55,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
struct rsi_mgmt_desc *mgmt_desc;
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss = NULL;
- struct xtended_desc *xtend_desc = NULL;
+ struct rsi_xtended_desc *xtend_desc = NULL;
u8 header_size;
u32 dword_align_bytes = 0;
@@ -69,7 +69,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
vif = tx_params->vif;
/* Update header size */
- header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc);
+ header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
if (header_size > skb_headroom(skb)) {
rsi_dbg(ERR_ZONE,
"%s: Failed to add extended descriptor\n",
@@ -92,7 +92,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
- xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ];
+ xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ),
RSI_WIFI_MGMT_Q);
@@ -113,17 +113,6 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
if (conf_is_ht40(conf))
mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE);
- if (ieee80211_is_probe_req(wh->frame_control)) {
- if (!bss->assoc) {
- rsi_dbg(INFO_ZONE,
- "%s: blocking mgmt queue\n", __func__);
- mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST;
- xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM;
- common->mgmt_q_block = true;
- rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n");
- }
- }
-
if (ieee80211_is_probe_resp(wh->frame_control)) {
mgmt_desc->misc_flags |= (RSI_ADD_DELTA_TSF_VAP_ID |
RSI_FETCH_RETRY_CNT_FRM_HST);
@@ -149,7 +138,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
}
/* This function prepares descriptor for given data packet */
-static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
struct ieee80211_vif *vif;
@@ -158,7 +147,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
struct rsi_data_desc *data_desc;
- struct xtended_desc *xtend_desc;
+ struct rsi_xtended_desc *xtend_desc;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
u8 header_size;
u8 vap_id = 0;
@@ -170,7 +159,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
- header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc);
+ header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
if (header_size > skb_headroom(skb)) {
rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
return -ENOSPC;
@@ -188,7 +177,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
data_desc = (struct rsi_data_desc *)skb->data;
memset(data_desc, 0, header_size);
- xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ];
+ xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
wh = (struct ieee80211_hdr *)&skb->data[header_size];
seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl));
@@ -243,6 +232,18 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST;
#define EAPOL_RETRY_CNT 15
xtend_desc->retry_cnt = EAPOL_RETRY_CNT;
+
+ if (common->eapol4_confirm)
+ skb->priority = VO_Q;
+ else
+ rsi_set_len_qno(&data_desc->len_qno,
+ (skb->len - FRAME_DESC_SZ),
+ RSI_WIFI_MGMT_Q);
+ if ((skb->len - header_size) == EAPOL4_PACKET_LEN) {
+ data_desc->misc_flags |=
+ RSI_DESC_REQUIRE_CFM_TO_HOST;
+ xtend_desc->confirm_frame_type = EAPOL4_CONFIRM;
+ }
}
data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff);
@@ -282,8 +283,11 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct rsi_hw *adapter = common->priv;
struct ieee80211_vif *vif;
struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
+ struct ieee80211_hdr *wh;
int status = -EINVAL;
+ u8 header_size;
if (!skb)
return 0;
@@ -295,16 +299,15 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
goto err;
vif = info->control.vif;
bss = &vif->bss_conf;
+ tx_params = (struct skb_info *)info->driver_data;
+ header_size = tx_params->internal_hdr_size;
+ wh = (struct ieee80211_hdr *)&skb->data[header_size];
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
(!bss->assoc))
goto err;
- status = rsi_prepare_data_desc(common, skb);
- if (status)
- goto err;
-
status = rsi_send_pkt_to_bus(common, skb);
if (status)
rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__);
@@ -327,12 +330,18 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
+ struct ieee80211_bss_conf *bss;
+ struct ieee80211_hdr *wh;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
+ struct rsi_mgmt_desc *mgmt_desc;
+ struct rsi_xtended_desc *xtend_desc;
int status = -E2BIG;
+ u8 header_size;
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
+ header_size = tx_params->internal_hdr_size;
if (tx_params->flags & INTERNAL_MGMT_PKT) {
status = adapter->host_intf_ops->write_pkt(common->priv,
@@ -346,15 +355,25 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
return status;
}
- if (FRAME_DESC_SZ > skb_headroom(skb))
- goto err;
+ bss = &info->control.vif->bss_conf;
+ wh = (struct ieee80211_hdr *)&skb->data[header_size];
+ mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
+ xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ];
+
+ /* Indicate to firmware to give cfm for probe */
+ if (ieee80211_is_probe_req(wh->frame_control) && !bss->assoc) {
+ rsi_dbg(INFO_ZONE,
+ "%s: blocking mgmt queue\n", __func__);
+ mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST;
+ xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM;
+ common->mgmt_q_block = true;
+ rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n");
+ }
- rsi_prepare_mgmt_desc(common, skb);
status = rsi_send_pkt_to_bus(common, skb);
if (status)
rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
-err:
rsi_indicate_tx_status(common->priv, skb, status);
return status;
}
@@ -616,28 +635,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content,
u32 content_size)
{
struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
- struct bl_header bl_hdr;
+ struct bl_header *bl_hdr;
u32 write_addr, write_len;
int status;
- bl_hdr.flags = 0;
- bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode);
- bl_hdr.check_sum = cpu_to_le32(
- *(u32 *)&flash_content[CHECK_SUM_OFFSET]);
- bl_hdr.flash_start_address = cpu_to_le32(
- *(u32 *)&flash_content[ADDR_OFFSET]);
- bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]);
+ bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL);
+ if (!bl_hdr)
+ return -ENOMEM;
+
+ bl_hdr->flags = 0;
+ bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode);
+ bl_hdr->check_sum =
+ cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]);
+ bl_hdr->flash_start_address =
+ cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]);
+ bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]);
write_len = sizeof(struct bl_header);
if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) {
write_addr = PING_BUFFER_ADDRESS;
status = hif_ops->write_reg_multiple(adapter, write_addr,
- (u8 *)&bl_hdr, write_len);
+ (u8 *)bl_hdr, write_len);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Failed to load Version/CRC structure\n",
__func__);
- return status;
+ goto fail;
}
} else {
write_addr = PING_BUFFER_ADDRESS >> 16;
@@ -646,20 +669,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content,
rsi_dbg(ERR_ZONE,
"%s: Unable to set ms word to common reg\n",
__func__);
- return status;
+ goto fail;
}
write_addr = RSI_SD_REQUEST_MASTER |
(PING_BUFFER_ADDRESS & 0xFFFF);
status = hif_ops->write_reg_multiple(adapter, write_addr,
- (u8 *)&bl_hdr, write_len);
+ (u8 *)bl_hdr, write_len);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Failed to load Version/CRC structure\n",
__func__);
- return status;
+ goto fail;
}
}
- return 0;
+ status = 0;
+fail:
+ kfree(bl_hdr);
+ return status;
}
static u32 read_flash_capacity(struct rsi_hw *adapter)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 32f5cb46fd4f..3faa0449a5ef 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -614,7 +614,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
/* Power save parameters */
if (changed & IEEE80211_CONF_CHANGE_PS) {
- struct ieee80211_vif *vif;
+ struct ieee80211_vif *vif, *sta_vif = NULL;
unsigned long flags;
int i, set_ps = 1;
@@ -628,13 +628,17 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
set_ps = 0;
break;
}
+ if ((vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_P2P_CLIENT) &&
+ (!sta_vif || vif->bss_conf.assoc))
+ sta_vif = vif;
}
- if (set_ps) {
+ if (set_ps && sta_vif) {
spin_lock_irqsave(&adapter->ps_lock, flags);
if (conf->flags & IEEE80211_CONF_PS)
- rsi_enable_ps(adapter, vif);
+ rsi_enable_ps(adapter, sta_vif);
else
- rsi_disable_ps(adapter, vif);
+ rsi_disable_ps(adapter, sta_vif);
spin_unlock_irqrestore(&adapter->ps_lock, flags);
}
}
@@ -737,7 +741,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid,
bss_conf->qos,
bss_conf->aid,
- NULL, 0, vif);
+ NULL, 0,
+ bss_conf->assoc_capability, vif);
adapter->ps_info.dtim_interval_duration = bss->dtim_period;
adapter->ps_info.listen_interval = conf->listen_interval;
@@ -906,14 +911,25 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
}
}
- return rsi_hal_load_key(adapter->priv,
- key->key,
- key->keylen,
- key_type,
- key->keyidx,
- key->cipher,
- sta_id,
- vif);
+ status = rsi_hal_load_key(adapter->priv,
+ key->key,
+ key->keylen,
+ key_type,
+ key->keyidx,
+ key->cipher,
+ sta_id,
+ vif);
+ if (status)
+ return status;
+
+ if (vif->type == NL80211_IFTYPE_STATION && key->key &&
+ (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
+ if (!rsi_send_block_unblock_frame(adapter->priv, false))
+ adapter->priv->hw_data_qs_blocked = false;
+ }
+
+ return 0;
}
/**
@@ -1391,7 +1407,7 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
rsi_dbg(INFO_ZONE, "Indicate bss status to device\n");
rsi_inform_bss_status(common, RSI_OPMODE_AP, 1,
sta->addr, sta->wme, sta->aid,
- sta, sta_idx, vif);
+ sta, sta_idx, 0, vif);
if (common->key) {
struct ieee80211_key_conf *key = common->key;
@@ -1469,7 +1485,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
rsi_inform_bss_status(common, RSI_OPMODE_AP, 0,
sta->addr, sta->wme,
sta->aid, sta, sta_idx,
- vif);
+ 0, vif);
rsta->sta = NULL;
rsta->sta_id = -1;
for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++)
@@ -1788,15 +1804,21 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
struct rsi_common *common = adapter->priv;
u16 triggers = 0;
u16 rx_filter_word = 0;
- struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf;
+ struct ieee80211_bss_conf *bss = NULL;
rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n");
+ if (!adapter->vifs[0])
+ return -EINVAL;
+
+ bss = &adapter->vifs[0]->bss_conf;
+
if (WARN_ON(!wowlan)) {
rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n");
return -EINVAL;
}
+ common->wow_flags |= RSI_WOW_ENABLED;
triggers = rsi_wow_map_triggers(common, wowlan);
if (!triggers) {
rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__);
@@ -1819,7 +1841,6 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
rx_filter_word = (ALLOW_DATA_ASSOC_PEER | DISALLOW_BEACONS);
rsi_send_rx_filter_frame(common, rx_filter_word);
- common->wow_flags |= RSI_WOW_ENABLED;
return 0;
}
@@ -1939,9 +1960,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->uapsd_queues = RSI_IEEE80211_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
- hw->max_tx_aggregation_subframes = 6;
- rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
- rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
+ hw->max_tx_aggregation_subframes = RSI_MAX_TX_AGGR_FRMS;
+ hw->max_rx_aggregation_subframes = RSI_MAX_RX_AGGR_FRMS;
hw->rate_control_algorithm = "AARF";
SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1962,10 +1982,15 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->available_antennas_rx = 1;
wiphy->available_antennas_tx = 1;
+
+ rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
wiphy->bands[NL80211_BAND_2GHZ] =
&adapter->sbands[NL80211_BAND_2GHZ];
- wiphy->bands[NL80211_BAND_5GHZ] =
- &adapter->sbands[NL80211_BAND_5GHZ];
+ if (common->num_supp_bands > 1) {
+ rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ &adapter->sbands[NL80211_BAND_5GHZ];
+ }
/* AP Parameters */
wiphy->max_ap_assoc_sta = rsi_max_ap_stas[common->oper_mode - 1];
@@ -1991,6 +2016,9 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->iface_combinations = rsi_iface_combinations;
wiphy->n_iface_combinations = ARRAY_SIZE(rsi_iface_combinations);
+ if (common->coex_mode > 1)
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
status = ieee80211_register_hw(hw);
if (status)
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index c21fca750fd4..0757adcb3f4a 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -325,8 +325,8 @@ static int rsi_load_radio_caps(struct rsi_common *common)
radio_caps->channel_num = common->channel;
radio_caps->rf_model = RSI_RF_TYPE;
+ radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ;
if (common->channel_width == BW_40MHZ) {
- radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ;
radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ;
if (common->fsm_state == FSM_MAC_INIT_DONE) {
@@ -454,14 +454,10 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
*
* Return: status: 0 on success, corresponding negative error code on failure.
*/
-static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
- enum opmode opmode,
- u8 notify_event,
- const unsigned char *bssid,
- u8 qos_enable,
- u16 aid,
- u16 sta_id,
- struct ieee80211_vif *vif)
+int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode,
+ u8 notify_event, const unsigned char *bssid,
+ u8 qos_enable, u16 aid, u16 sta_id,
+ struct ieee80211_vif *vif)
{
struct sk_buff *skb = NULL;
struct rsi_peer_notify *peer_notify;
@@ -1328,6 +1324,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
u16 aid,
struct ieee80211_sta *sta,
u16 sta_id,
+ u16 assoc_cap,
struct ieee80211_vif *vif)
{
if (status) {
@@ -1342,10 +1339,10 @@ void rsi_inform_bss_status(struct rsi_common *common,
vif);
if (common->min_rate == 0xffff)
rsi_send_auto_rate_request(common, sta, sta_id, vif);
- if (opmode == RSI_OPMODE_STA) {
- if (!rsi_send_block_unblock_frame(common, false))
- common->hw_data_qs_blocked = false;
- }
+ if (opmode == RSI_OPMODE_STA &&
+ !(assoc_cap & WLAN_CAPABILITY_PRIVACY) &&
+ !rsi_send_block_unblock_frame(common, false))
+ common->hw_data_qs_blocked = false;
} else {
if (opmode == RSI_OPMODE_STA)
common->hw_data_qs_blocked = true;
@@ -1850,10 +1847,19 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
__func__);
return rsi_handle_card_ready(common, msg);
case TX_STATUS_IND:
- if (msg[15] == PROBEREQ_CONFIRM) {
+ switch (msg[RSI_TX_STATUS_TYPE]) {
+ case PROBEREQ_CONFIRM:
common->mgmt_q_block = false;
rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
__func__);
+ break;
+ case EAPOL4_CONFIRM:
+ if (msg[RSI_TX_STATUS]) {
+ common->eapol4_confirm = true;
+ if (!rsi_send_block_unblock_frame(common,
+ false))
+ common->hw_data_qs_blocked = false;
+ }
}
break;
case BEACON_EVENT_IND:
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index d76e69c0beaa..416981d99229 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -170,7 +170,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
int err;
struct mmc_card *card = pfunction->card;
struct mmc_host *host = card->host;
- s32 bit = (fls(host->ocr_avail) - 1);
u8 cmd52_resp;
u32 clock, resp, i;
u16 rca;
@@ -190,7 +189,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
msleep(20);
/* Initialize the SDIO card */
- host->ios.vdd = bit;
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.power_mode = MMC_POWER_UP;
@@ -660,8 +658,6 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
if (!data)
return -ENOMEM;
- data = PTR_ALIGN(data, 8);
-
ms_addr = (addr >> 16);
status = rsi_sdio_master_access_msword(adapter, ms_addr);
if (status < 0) {
@@ -724,8 +720,6 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
if (!data_aligned)
return -ENOMEM;
- data_aligned = PTR_ALIGN(data_aligned, 8);
-
if (size == 2) {
*data_aligned = ((data << 16) | (data & 0xFFFF));
} else if (size == 1) {
@@ -1042,17 +1036,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data,
/*This function resets and re-initializes the chip.*/
static void rsi_reset_chip(struct rsi_hw *adapter)
{
- __le32 data;
+ u8 *data;
u8 sdio_interrupt_status = 0;
u8 request = 1;
int ret;
+ data = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!data)
+ return;
+
rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n");
ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request);
if (ret < 0) {
rsi_dbg(ERR_ZONE,
"%s: Failed to write SDIO wakeup register\n", __func__);
- return;
+ goto err;
}
msleep(20);
ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER,
@@ -1060,7 +1058,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
if (ret < 0) {
rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n",
__func__);
- return;
+ goto err;
}
rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n",
__func__, sdio_interrupt_status);
@@ -1070,17 +1068,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
rsi_dbg(ERR_ZONE,
"%s: Unable to set ms word to common reg\n",
__func__);
- return;
+ goto err;
}
- data = TA_HOLD_THREAD_VALUE;
+ put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG |
RSI_SD_REQUEST_MASTER,
- (u8 *)&data, 4)) {
+ data, 4)) {
rsi_dbg(ERR_ZONE,
"%s: Unable to hold Thread-Arch processor threads\n",
__func__);
- return;
+ goto err;
}
/* This msleep will ensure Thread-Arch processor to go to hold
@@ -1101,6 +1099,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
* read write operations to complete for chip reset.
*/
msleep(500);
+err:
+ kfree(data);
+ return;
}
/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 7b8bae313aa9..6ce6b754df12 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -687,6 +687,14 @@ static int rsi_reset_card(struct rsi_hw *adapter)
*/
msleep(100);
+ ret = rsi_usb_master_reg_write(adapter, SWBL_REGOUT,
+ RSI_FW_WDT_DISABLE_REQ,
+ RSI_COMMON_REG_SIZE);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE, "Disabling firmware watchdog timer failed\n");
+ goto fail;
+ }
+
ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1,
RSI_ULP_WRITE_2, 32);
if (ret < 0)
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
index 238ee96434ec..ad903b22440e 100644
--- a/drivers/net/wireless/rsi/rsi_boot_params.h
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -46,7 +46,8 @@
(((TA_PLL_M_VAL_20 + 1) * 40) / \
((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1)))
#define VALID_20 \
- (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS)
+ (WIFI_TAPLL_CONFIGS | WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | \
+ WIFI_SWITCH_CLK_CONFIGS | BOOTUP_MODE_INFO | CRYSTAL_GOOD_TIME)
#define UMAC_CLK_40BW \
(((TA_PLL_M_VAL_40 + 1) * 40) / \
((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1)))
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 786dccd0b732..327638cdd30b 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -115,6 +115,7 @@
#define FW_FLASH_OFFSET 0x820
#define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200)
#define MAX_DWORD_ALIGN_BYTES 64
+#define RSI_COMMON_REG_SIZE 2
struct bl_header {
__le32 flags;
@@ -167,6 +168,8 @@ struct rsi_bt_desc {
} __packed;
int rsi_hal_device_init(struct rsi_hw *adapter);
+int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb);
+int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb);
int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index ef4fa323694b..a084f224bb03 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -190,12 +190,6 @@ struct cqm_info {
u32 rssi_hyst;
};
-struct xtended_desc {
- u8 confirm_frame_type;
- u8 retry_cnt;
- u16 reserved;
-};
-
enum rsi_dfs_regions {
RSI_REGION_FCC = 0,
RSI_REGION_ETSI,
@@ -293,6 +287,7 @@ struct rsi_common {
struct timer_list roc_timer;
struct ieee80211_vif *roc_vif;
+ bool eapol4_confirm;
void *bt_adapter;
};
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index cf6567ae5bbe..14620935c925 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -33,6 +33,7 @@
#define WMM_SHORT_SLOT_TIME 9
#define SIFS_DURATION 16
+#define EAPOL4_PACKET_LEN 0x85
#define KEY_TYPE_CLEAR 0
#define RSI_PAIRWISE_KEY 1
#define RSI_GROUP_KEY 2
@@ -62,9 +63,12 @@
#define RX_DOT11_MGMT 0x02
#define TX_STATUS_IND 0x04
#define BEACON_EVENT_IND 0x08
+#define EAPOL4_CONFIRM 1
#define PROBEREQ_CONFIRM 2
#define CARD_READY_IND 0x00
#define SLEEP_NOTIFY_IND 0x06
+#define RSI_TX_STATUS_TYPE 15
+#define RSI_TX_STATUS 12
#define RSI_DELETE_PEER 0x0
#define RSI_ADD_PEER 0x1
@@ -221,6 +225,9 @@
#define RSI_WOW_DISCONNECT BIT(5)
#endif
+#define RSI_MAX_TX_AGGR_FRMS 8
+#define RSI_MAX_RX_AGGR_FRMS 8
+
enum opmode {
RSI_OPMODE_UNSUPPORTED = -1,
RSI_OPMODE_AP = 0,
@@ -301,6 +308,12 @@ struct rsi_mac_frame {
#define ENCAP_MGMT_PKT BIT(7)
#define DESC_IMMEDIATE_WAKEUP BIT(15)
+struct rsi_xtended_desc {
+ u8 confirm_frame_type;
+ u8 retry_cnt;
+ u16 reserved;
+};
+
struct rsi_cmd_desc_dword0 {
__le16 len_qno;
u8 frame_type;
@@ -654,10 +667,14 @@ int rsi_set_channel(struct rsi_common *common,
struct ieee80211_channel *channel);
int rsi_send_vap_dynamic_update(struct rsi_common *common);
int rsi_send_block_unblock_frame(struct rsi_common *common, bool event);
+int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode,
+ u8 notify_event, const unsigned char *bssid,
+ u8 qos_enable, u16 aid, u16 sta_id,
+ struct ieee80211_vif *vif);
void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode,
u8 status, const u8 *addr, u8 qos_enable, u16 aid,
struct ieee80211_sta *sta, u16 sta_id,
- struct ieee80211_vif *vif);
+ u16 assoc_cap, struct ieee80211_vif *vif);
void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
int rsi_mac80211_attach(struct rsi_common *common);
void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index ead8e7c4df3a..353dbdf31e75 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -87,7 +87,7 @@ enum sdio_interrupt_type {
#define TA_SOFT_RST_CLR 0
#define TA_SOFT_RST_SET BIT(0)
#define TA_PC_ZERO 0
-#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_HOLD_THREAD_VALUE 0xF
#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
#define TA_BASE_ADDR 0x2200
#define MISC_CFG_BASE_ADDR 0x4105
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index a88d59295a98..b6fe79f0a513 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -26,6 +26,7 @@
#define RSI_USB_READY_MAGIC_NUM 0xab
#define FW_STATUS_REG 0x41050012
#define RSI_TA_HOLD_REG 0x22000844
+#define RSI_FW_WDT_DISABLE_REQ 0x69
#define USB_VENDOR_REGISTER_READ 0x15
#define USB_VENDOR_REGISTER_WRITE 0x16
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index e9050b41157a..f7b1b0062db3 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1069,7 +1069,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
}
if (skb->len < sizeof(struct ieee80211_pspoll)) {
- wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n");
+ wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n");
goto drop;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 1f727babbea0..6dbe61d47dc3 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -155,17 +155,11 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
struct mmc_card *card = func->card;
ret = pm_runtime_get_sync(&card->dev);
- if (ret) {
- /*
- * Runtime PM might be temporarily disabled, or the device
- * might have a positive reference counter. Make sure it is
- * really powered on.
- */
- ret = mmc_power_restore_host(card->host);
- if (ret < 0) {
- pm_runtime_put_sync(&card->dev);
- goto out;
- }
+ if (ret < 0) {
+ pm_runtime_put_noidle(&card->dev);
+ dev_err(glue->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ goto out;
}
sdio_claim_host(func);
@@ -178,7 +172,6 @@ out:
static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
- int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
@@ -186,16 +179,8 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
sdio_disable_func(func);
sdio_release_host(func);
- /* Power off the card manually in case it wasn't powered off above */
- ret = mmc_power_save_host(card->host);
- if (ret < 0)
- goto out;
-
/* Let runtime PM know the card is powered off */
- pm_runtime_put_sync(&card->dev);
-
-out:
- return ret;
+ return pm_runtime_put_sync(&card->dev);
}
static int wl12xx_sdio_set_power(struct device *child, bool enable)
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 85997184e047..9d36473dc2a2 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -103,8 +103,7 @@ config NVDIMM_DAX
Select Y if unsure
config OF_PMEM
- # FIXME: make tristate once OF_NUMA dependency removed
- bool "Device-tree support for persistent memory regions"
+ tristate "Device-tree support for persistent memory regions"
depends on OF
default LIBNVDIMM
help
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index e00d45522b80..8d348b22ba45 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
struct nvdimm_bus_descriptor *nd_desc;
- int rc = validate_dimm(ndd);
u32 max_cmd_size, config_size;
size_t offset;
@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
cmd->in_offset = offset;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd), NULL);
- if (rc || cmd->status) {
- rc = -ENXIO;
+ cmd->in_length + sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ break;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
break;
}
memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len)
{
- int rc = validate_dimm(ndd);
size_t max_cmd_size, buf_offset;
struct nd_cmd_set_config_hdr *cmd;
+ int rc = validate_dimm(ndd), cmd_rc = 0;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
for (buf_offset = 0; len; len -= cmd->in_length,
buf_offset += cmd->in_length) {
size_t cmd_size;
- u32 *status;
cmd->in_offset = offset + buf_offset;
cmd->in_length = min(max_cmd_size, len);
@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
- status = ((void *) cmd) + cmd_size - sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
- if (rc || *status) {
- rc = rc ? rc : -ENXIO;
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ if (rc < 0)
+ break;
+ if (cmd_rc < 0) {
+ rc = cmd_rc;
break;
}
}
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 85013bad35de..0a701837dfc0 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
- ndr_desc.numa_node = of_node_to_nid(np);
+ ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.res = &pdev->resource[i];
ndr_desc.of_node = np;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b979cf3bce65..88a8b5916624 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -27,7 +27,7 @@ config NVME_FABRICS
config NVME_RDMA
tristate "NVM Express over Fabrics RDMA host driver"
- depends on INFINIBAND && BLOCK
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
select NVME_CORE
select NVME_FABRICS
select SG_POOL
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 197a6ba9700f..99b857e5a7a9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -99,6 +99,7 @@ static struct class *nvme_subsys_class;
static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
+static void nvme_put_subsystem(struct nvme_subsystem *subsys);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
@@ -117,7 +118,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (ctrl->state != NVME_CTRL_LIVE &&
+ ctrl->state != NVME_CTRL_ADMIN_ONLY)
ret = -ENETRESET;
}
@@ -350,6 +352,7 @@ static void nvme_free_ns_head(struct kref *ref)
ida_simple_remove(&head->subsys->ns_ida, head->instance);
list_del_init(&head->entry);
cleanup_srcu_struct(&head->srcu);
+ nvme_put_subsystem(head->subsys);
kfree(head);
}
@@ -376,6 +379,15 @@ static void nvme_put_ns(struct nvme_ns *ns)
kref_put(&ns->kref, nvme_free_ns);
}
+static inline void nvme_clear_nvme_request(struct request *req)
+{
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+}
+
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
@@ -392,6 +404,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
return req;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ nvme_clear_nvme_request(req);
nvme_req(req)->cmd = cmd;
return req;
@@ -608,11 +621,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
{
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
- nvme_req(req)->retries = 0;
- nvme_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
+ nvme_clear_nvme_request(req);
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -742,6 +751,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -757,6 +767,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = PTR_ERR(meta);
goto out_unmap;
}
+ req->cmd_flags |= REQ_INTEGRITY;
}
}
@@ -826,7 +837,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
}
}
-void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -836,7 +847,6 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
-EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
@@ -1103,7 +1113,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
else
effects = nvme_known_admin_effects(opcode);
@@ -2220,7 +2230,7 @@ out_unlock:
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 log_page, void *log,
- size_t size, size_t offset)
+ size_t size, u64 offset)
{
struct nvme_command c = { };
unsigned long dwlen = size / 4 - 1;
@@ -2235,8 +2245,8 @@ int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.get_log_page.lid = log_page;
c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
- c.get_log_page.lpol = cpu_to_le32(offset & ((1ULL << 32) - 1));
- c.get_log_page.lpou = cpu_to_le32(offset >> 32ULL);
+ c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
+ c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
@@ -2833,7 +2843,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_free_head;
head->instance = ret;
INIT_LIST_HEAD(&head->list);
- init_srcu_struct(&head->srcu);
+ ret = init_srcu_struct(&head->srcu);
+ if (ret)
+ goto out_ida_remove;
head->subsys = ctrl->subsys;
head->ns_id = nsid;
kref_init(&head->ref);
@@ -2852,9 +2864,13 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_cleanup_srcu;
list_add_tail(&head->entry, &ctrl->subsys->nsheads);
+
+ kref_get(&ctrl->subsys->ref);
+
return head;
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
+out_ida_remove:
ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
@@ -2988,31 +3004,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_init_ns_head(ns, nsid, id))
goto out_free_id;
nvme_setup_streams_ns(ctrl, ns);
-
-#ifdef CONFIG_NVME_MULTIPATH
- /*
- * If multipathing is enabled we need to always use the subsystem
- * instance number for numbering our devices to avoid conflicts
- * between subsystems that have multiple controllers and thus use
- * the multipath-aware subsystem node and those that have a single
- * controller and use the controller node directly.
- */
- if (ns->head->disk) {
- sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->cntlid, ns->head->instance);
- flags = GENHD_FL_HIDDEN;
- } else {
- sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
- ns->head->instance);
- }
-#else
- /*
- * But without the multipath code enabled, multiple controller per
- * subsystems are visible as devices and thus we cannot use the
- * subsystem instance.
- */
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
-#endif
+ nvme_set_disk_name(disk_name, ns, ctrl, &flags);
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) {
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 8f0f34d06d46..7ae732a77fe8 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -536,6 +536,85 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
+blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live, bool is_connected)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
+ return BLK_STS_OK;
+
+ switch (ctrl->state) {
+ case NVME_CTRL_DELETING:
+ goto reject_io;
+
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ if (!is_connected)
+ /*
+ * This is the case of starting a new
+ * association but connectivity was lost
+ * before it was fully created. We need to
+ * error the commands used to initialize the
+ * controller so the reconnect can go into a
+ * retry attempt. The commands should all be
+ * marked REQ_FAILFAST_DRIVER, which will hit
+ * the reject path below. Anything else will
+ * be queued while the state settles.
+ */
+ goto reject_or_queue_io;
+
+ if ((queue_live &&
+ !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
+ (!queue_live && blk_rq_is_passthrough(rq) &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect))
+ /*
+ * If queue is live, allow only commands that
+ * are internally generated pass through. These
+ * are commands on the admin queue to initialize
+ * the controller. This will reject any ioctl
+ * admin cmds received while initializing.
+ *
+ * If the queue is not live, allow only a
+ * connect command. This will reject any ioctl
+ * admin cmd as well as initialization commands
+ * if the controller reverted the queue to non-live.
+ */
+ return BLK_STS_OK;
+
+ /*
+ * fall-thru to the reject_or_queue_io clause
+ */
+ break;
+
+ /* these cases fall-thru
+ * case NVME_CTRL_LIVE:
+ * case NVME_CTRL_RESETTING:
+ */
+ default:
+ break;
+ }
+
+reject_or_queue_io:
+ /*
+ * Any other new io is something we're not in a state to send
+ * to the device. Default action is to busy it and retry it
+ * after the controller state is recovered. However, anything
+ * marked for failfast or nvme multipath is immediately failed.
+ * Note: commands used to initialize the controller will be
+ * marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+ if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+
+reject_io:
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
+
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -589,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->transport);
opts->transport = p;
break;
case NVMF_OPT_NQN:
@@ -597,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->subsysnqn);
opts->subsysnqn = p;
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
@@ -608,8 +689,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->discovery_nqn =
!(strcmp(opts->subsysnqn,
NVME_DISC_SUBSYS_NAME));
- if (opts->discovery_nqn)
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
opts->nr_io_queues = 0;
+ }
break;
case NVMF_OPT_TRADDR:
p = match_strdup(args);
@@ -617,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->traddr);
opts->traddr = p;
break;
case NVMF_OPT_TRSVCID:
@@ -625,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->trsvcid);
opts->trsvcid = p;
break;
case NVMF_OPT_QUEUE_SIZE:
@@ -711,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL;
goto out;
}
+ nvmf_host_put(opts->host);
opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) {
@@ -736,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
+ kfree(opts->host_traddr);
opts->host_traddr = p;
break;
case NVMF_OPT_HOST_ID:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a3145d90c1d2..ef46c915b7b5 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -157,36 +157,7 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-
-static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
- struct request *rq)
-{
- struct nvme_command *cmd = nvme_req(rq)->cmd;
-
- /*
- * We cannot accept any other command until the connect command has
- * completed, so only allow connect to pass.
- */
- if (!blk_rq_is_passthrough(rq) ||
- cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- /*
- * Connecting state means transport disruption or initial
- * establishment, which can take a long time and even might
- * fail permanently, fail fast to give upper layers a chance
- * to failover.
- * Deleting state means that the ctrl will never accept commands
- * again, fail it permanently.
- */
- if (ctrl->state == NVME_CTRL_CONNECTING ||
- ctrl->state == NVME_CTRL_DELETING) {
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
- }
- return BLK_STS_RESOURCE; /* try again later */
- }
-
- return BLK_STS_OK;
-}
+blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+ struct request *rq, bool queue_live, bool is_connected);
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index c6e719b2f3ca..6cb26bcf6ec0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2277,14 +2277,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_STS_OK;
}
-static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
@@ -2300,7 +2292,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len;
blk_status_t ret;
- ret = nvme_fc_is_ready(queue, rq);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+ test_bit(NVME_FC_Q_LIVE, &queue->flags),
+ ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
if (unlikely(ret))
return ret;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 956e0b8e9c4d..d7b664ae5923 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -15,10 +15,32 @@
#include "nvme.h"
static bool multipath = true;
-module_param(multipath, bool, 0644);
+module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+/*
+ * If multipathing is enabled we need to always use the subsystem instance
+ * number for numbering our devices to avoid conflicts between subsystems that
+ * have multiple controllers and thus use the multipath-aware subsystem node
+ * and those that have a single controller and use the controller node
+ * directly.
+ */
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags)
+{
+ if (!multipath) {
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+ } else if (ns->head->disk) {
+ sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->cntlid, ns->head->instance);
+ *flags = GENHD_FL_HIDDEN;
+ } else {
+ sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ }
+}
+
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cf93690b3ffc..17d2f7cf3fed 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -84,6 +84,11 @@ enum nvme_quirks {
* Supports the LighNVM command set if indicated in vs[1].
*/
NVME_QUIRK_LIGHTNVM = (1 << 6),
+
+ /*
+ * Set MEDIUM priority on SQ creation
+ */
+ NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
};
/*
@@ -105,6 +110,7 @@ struct nvme_request {
enum {
NVME_REQ_CANCELLED = (1 << 0),
+ NVME_REQ_USERCMD = (1 << 1),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -422,7 +428,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
unsigned timeout, int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
-void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -430,12 +435,14 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log, size_t size, size_t offset);
+ u8 log_page, void *log, size_t size, u64 offset);
extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -461,6 +468,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
}
#else
+/*
+ * Without the multipath code enabled, multiple controller per subsystems are
+ * visible as devices and thus we cannot use the subsystem instance.
+ */
+static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags)
+{
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+}
+
static inline void nvme_failover_req(struct request *req)
{
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 295fbec1e5f2..17a0190bd88f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -84,6 +84,7 @@ struct nvme_dev {
struct dma_pool *prp_small_pool;
unsigned online_queues;
unsigned max_qid;
+ unsigned int num_vecs;
int q_depth;
u32 db_stride;
void __iomem *bar;
@@ -414,7 +415,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
- return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0);
+ return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
+ dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
}
/**
@@ -1091,10 +1093,19 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
+ struct nvme_ctrl *ctrl = &dev->ctrl;
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
+ * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
+ * set. Since URGENT priority is zeroes, it makes all queues
+ * URGENT.
+ */
+ if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
+ flags |= NVME_SQ_PRIO_MEDIUM;
+
+ /*
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
@@ -1380,8 +1391,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
{
struct nvme_queue *nvmeq = &dev->queues[qid];
@@ -1457,7 +1467,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
nvmeq->sq_cmds_io = dev->cmb + offset;
}
- nvmeq->cq_vector = qid - 1;
+ /*
+ * A queue's vector matches the queue identifier unless the controller
+ * has only one vector available.
+ */
+ nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
goto release_vector;
@@ -1596,8 +1610,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (result)
return result;
@@ -1630,9 +1643,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
int ret = 0;
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
- /* vector == qid - 1, match nvme_create_queue */
- if (nvme_alloc_queue(dev, i, dev->q_depth,
- pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
+ if (nvme_alloc_queue(dev, i, dev->q_depth)) {
ret = -ENOMEM;
break;
}
@@ -1914,6 +1925,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
int result, nr_io_queues;
unsigned long size;
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
nr_io_queues = num_possible_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
@@ -1949,11 +1964,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* setting up the full range we need.
*/
pci_free_irq_vectors(pdev);
- nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
- if (nr_io_queues <= 0)
+ result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (result <= 0)
return -EIO;
- dev->max_qid = nr_io_queues;
+ dev->num_vecs = result;
+ dev->max_qid = max(result - 1, 1);
/*
* Should investigate if there's a performance win from allocating
@@ -2201,7 +2217,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
- if (!dead) {
+ if (!dead && dev->ctrl.queue_count > 0) {
/*
* If the controller is still alive tell it to stop using the
* host memory buffer. In theory the shutdown / reset should
@@ -2694,7 +2710,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
- .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_MEDIUM_PRIO_SQ },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 758537e9ba07..1eb4438a8763 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1601,17 +1601,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-/*
- * We cannot accept any other command until the Connect command has completed.
- */
-static inline blk_status_t
-nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1627,7 +1616,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
- ret = nvme_rdma_is_ready(queue, rq);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+ test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
if (unlikely(ret))
return ret;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 5f4f8b16685f..3c7b61ddb0d1 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
- depends on INFINIBAND
+ depends on INFINIBAND && INFINIBAND_ADDR_TRANS
depends on NVME_TARGET
select SGL_ALLOC
help
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 90dcdc40ac71..5e0e9fcc0d4d 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -178,6 +178,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
+ memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index a72425d8bce0..231e04e0a496 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -59,7 +59,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
- memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+ strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
}
/*
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 28bbdff4a88b..cd2344179673 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -173,8 +173,8 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
- nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
- (req->ns->blksize_shift - 9)) + 1;
+ nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ (req->ns->blksize_shift - 9));
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0))
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a350765d2d5c..27a8561c0cb9 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -149,14 +149,6 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -166,7 +158,8 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
- ret = nvme_loop_is_ready(queue, req);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
+ test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
if (unlikely(ret))
return ret;
@@ -174,15 +167,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
+ blk_mq_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = nvmet_loop_port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
- &queue->nvme_sq, &nvme_loop_ops)) {
- nvme_cleanup_cmd(req);
- blk_mq_start_request(req);
- nvme_loop_queue_response(&iod->req);
+ &queue->nvme_sq, &nvme_loop_ops))
return BLK_STS_OK;
- }
if (blk_rq_payload_bytes(req)) {
iod->sg_table.sgl = iod->first_sgl;
@@ -196,8 +186,6 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- blk_mq_start_request(req);
-
schedule_work(&iod->work);
return BLK_STS_OK;
}
@@ -481,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
ret = nvme_loop_configure_admin_queue(ctrl);
if (ret)
goto out_disable;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 84aa9d676375..6da20b9688f7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
int offset;
const char *p, *q, *options = NULL;
int l;
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
const void *fdt = initial_boot_params;
offset = fdt_path_offset(fdt, "/chosen");
@@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
return 0;
}
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
+
if (!match->compatible[0])
continue;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 8c0c92712fc9..d963baf8e53a 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -204,6 +204,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
bool scanphys = false;
int addr, rc;
+ if (!np)
+ return mdiobus_register(mdio);
+
/* Do not continue if the node is disabled */
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index b35fe88f1851..7baa53e5b1d7 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -102,12 +102,28 @@ static DEFINE_IDR(ovcs_idr);
static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
+/**
+ * of_overlay_notifier_register() - Register notifier for overlay operations
+ * @nb: Notifier block to register
+ *
+ * Register for notification on overlay operations on device tree nodes. The
+ * reported actions definied by @of_reconfig_change. The notifier callback
+ * furthermore receives a pointer to the affected device tree node.
+ *
+ * Note that a notifier callback is not supposed to store pointers to a device
+ * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the
+ * respective node it received.
+ */
int of_overlay_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
+/**
+ * of_overlay_notifier_register() - Unregister notifier for overlay operations
+ * @nb: Notifier block to unregister
+ */
int of_overlay_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&overlay_notify_chain, nb);
@@ -671,17 +687,13 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
of_node_put(ovcs->fragments[i].overlay);
}
kfree(ovcs->fragments);
-
/*
- * TODO
- *
- * would like to: kfree(ovcs->overlay_tree);
- * but can not since drivers may have pointers into this data
- *
- * would like to: kfree(ovcs->fdt);
- * but can not since drivers may have pointers into this data
+ * There should be no live pointers into ovcs->overlay_tree and
+ * ovcs->fdt due to the policy that overlay notifiers are not allowed
+ * to retain pointers into the overlay devicetree.
*/
-
+ kfree(ovcs->overlay_tree);
+ kfree(ovcs->fdt);
kfree(ovcs);
}
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 8fd0ea4b92b0..013d85e694c6 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \
overlay_bad_symbol.dtb.o \
overlay_base.dtb.o
-targets += $(foreach suffix, dtb dtb.S, $(patsubst %.dtb.o,%.$(suffix),$(obj-y)))
-
# enable creation of __symbols__ node
DTC_FLAGS_overlay += -@
DTC_FLAGS_overlay_bad_phandle += -@
@@ -32,7 +30,3 @@ DTC_FLAGS_testcases += -@
# suppress warnings about intentional errors
DTC_FLAGS_testcases += -Wno-interrupts_property
-
-.PRECIOUS: \
- $(obj)/%.dtb.S \
- $(obj)/%.dtb
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index acba1f56af3e..297599fcbc32 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1195,7 +1195,7 @@ void * ccio_get_iommu(const struct parisc_device *dev)
* to/from certain pages. To avoid this happening, we mark these pages
* as `used', and ensure that nothing will try to allocate from them.
*/
-void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
+void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
{
unsigned int idx;
struct parisc_device *dev = parisc_parent(cujo);
@@ -1263,7 +1263,7 @@ static struct parisc_driver ccio_driver __refdata = {
* I/O Page Directory, the resource map, and initalizing the
* U2/Uturn chip into virtual mode.
*/
-static void
+static void __init
ccio_ioc_init(struct ioc *ioc)
{
int i;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index a6b88c7f6e3e..d2970a009eb5 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return ret;
kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
- "reset-gpio", 0);
+ "reset-gpios", 0);
if (kirin_pcie->gpio_id_reset < 0)
return -ENODEV;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..9abf549631b4 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -29,6 +29,7 @@
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -100,7 +101,8 @@
#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
#define PCIE_ISR1_FLUSH BIT(5)
-#define PCIE_ISR1_ALL_MASK GENMASK(5, 4)
+#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
+#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
@@ -172,8 +174,6 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev) (dev << 4)
#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+ (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+ PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
/* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if (PCI_SLOT(devfn) != 0) {
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
advk_writel(pcie, reg, PIO_CTRL);
/* Program the address registers */
- reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+ reg = PCIE_CONF_ADDR(bus->number, devfn, where);
advk_writel(pcie, reg, PIO_ADDR_LS);
advk_writel(pcie, 0, PIO_ADDR_MS);
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
- if (PCI_SLOT(devfn) != 0)
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 mask;
- mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
- advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+ mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
}
static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 mask;
- mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
- advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+ mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
}
static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
static void advk_pcie_handle_int(struct advk_pcie *pcie)
{
- u32 val, mask, status;
+ u32 isr0_val, isr0_mask, isr0_status;
+ u32 isr1_val, isr1_mask, isr1_status;
int i, virq;
- val = advk_readl(pcie, PCIE_ISR0_REG);
- mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
+ isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
+ isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
+
+ isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
+ isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+ isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
- if (!status) {
- advk_writel(pcie, val, PCIE_ISR0_REG);
+ if (!isr0_status && !isr1_status) {
+ advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
+ advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
return;
}
/* Process MSI interrupts */
- if (status & PCIE_ISR0_MSI_INT_PENDING)
+ if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie);
/* Process legacy interrupts */
for (i = 0; i < PCI_NUM_INTX; i++) {
- if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
+ if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
continue;
- advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
- PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
+ PCIE_ISR1_REG);
virq = irq_find_mapping(pcie->irq_domain, i);
generic_handle_irq(virq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
* devices should not be touched during freeze/thaw transitions,
* however.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ if (!dev_pm_smart_suspend_and_suspended(dev)) {
pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
+ }
- pci_dev->state_saved = false;
if (pm->freeze) {
int error;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index aa86e904f93c..dbfe7c4f3776 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1910,7 +1910,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
EXPORT_SYMBOL(pci_pme_active);
/**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
* @enable: True to enable event generation; false to disable
@@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(pci_pme_active);
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int ret = 0;
@@ -1969,6 +1969,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
return ret;
}
+
+/**
+ * pci_enable_wake - change wakeup settings for a PCI device
+ * @pci_dev: Target device
+ * @state: PCI state from which device will issue wakeup events
+ * @enable: Whether or not to enable event generation
+ *
+ * If @enable is set, check device_may_wakeup() for the device before calling
+ * __pci_enable_wake() for it.
+ */
+int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
+{
+ if (enable && !device_may_wakeup(&pci_dev->dev))
+ return -EINVAL;
+
+ return __pci_enable_wake(pci_dev, state, enable);
+}
EXPORT_SYMBOL(pci_enable_wake);
/**
@@ -1981,9 +1998,9 @@ EXPORT_SYMBOL(pci_enable_wake);
* should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
* ordering constraints.
*
- * This function only returns error code if the device is not capable of
- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
- * enable wake-up power for it.
+ * This function only returns error code if the device is not allowed to wake
+ * up the system from sleep or it is not capable of generating PME# from both
+ * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
*/
int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
@@ -2114,7 +2131,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
dev->runtime_d3cold = target_state == PCI_D3cold;
- pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+ __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
@@ -2138,16 +2155,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- if (device_can_wakeup(&dev->dev))
- return true;
-
if (!dev->pme_support)
return false;
/* PME-capable in principle, but not from the target power state */
- if (!pci_pme_capable(dev, pci_target_state(dev, false)))
+ if (!pci_pme_capable(dev, pci_target_state(dev, true)))
return false;
+ if (device_can_wakeup(&dev->dev))
+ return true;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
@@ -5273,11 +5290,11 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
if (bw_avail >= bw_cap)
- pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
else
- pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
PCIE_SPEED2STR(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
@@ -5628,7 +5645,6 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
return;
}
- pci_info(dev, "Disabling memory decoding and releasing memory resources\n");
pci_read_config_word(dev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 26141b1946ee..2990ad1e7c99 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4673,9 +4673,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
/*
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 0cd83a4bd4c8..d8ca40a97693 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -168,8 +168,6 @@ EXPORT_SYMBOL(pci_claim_resource);
void pci_disable_bridge_window(struct pci_dev *dev)
{
- pci_info(dev, "disabling bridge mem windows\n");
-
/* MMIO Base/Limit */
pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index a0d522154cdf..4ef429250d7b 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -135,19 +135,25 @@ struct mvebu_comhy_conf {
static const struct mvebu_comhy_conf mvebu_comphy_cp110_modes[] = {
/* lane 0 */
MVEBU_COMPHY_CONF(0, 1, PHY_MODE_SGMII, 0x1),
+ MVEBU_COMPHY_CONF(0, 1, PHY_MODE_2500SGMII, 0x1),
/* lane 1 */
MVEBU_COMPHY_CONF(1, 2, PHY_MODE_SGMII, 0x1),
+ MVEBU_COMPHY_CONF(1, 2, PHY_MODE_2500SGMII, 0x1),
/* lane 2 */
MVEBU_COMPHY_CONF(2, 0, PHY_MODE_SGMII, 0x1),
+ MVEBU_COMPHY_CONF(2, 0, PHY_MODE_2500SGMII, 0x1),
MVEBU_COMPHY_CONF(2, 0, PHY_MODE_10GKR, 0x1),
/* lane 3 */
MVEBU_COMPHY_CONF(3, 1, PHY_MODE_SGMII, 0x2),
+ MVEBU_COMPHY_CONF(3, 1, PHY_MODE_2500SGMII, 0x2),
/* lane 4 */
MVEBU_COMPHY_CONF(4, 0, PHY_MODE_SGMII, 0x2),
+ MVEBU_COMPHY_CONF(4, 0, PHY_MODE_2500SGMII, 0x2),
MVEBU_COMPHY_CONF(4, 0, PHY_MODE_10GKR, 0x2),
MVEBU_COMPHY_CONF(4, 1, PHY_MODE_SGMII, 0x1),
/* lane 5 */
MVEBU_COMPHY_CONF(5, 2, PHY_MODE_SGMII, 0x1),
+ MVEBU_COMPHY_CONF(5, 2, PHY_MODE_2500SGMII, 0x1),
};
struct mvebu_comphy_priv {
@@ -206,6 +212,10 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane,
if (mode == PHY_MODE_10GKR)
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
+ else if (mode == PHY_MODE_2500SGMII)
+ val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) |
+ MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) |
+ MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
else if (mode == PHY_MODE_SGMII)
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) |
@@ -296,13 +306,13 @@ static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane,
return 0;
}
-static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
+static int mvebu_comphy_set_mode_sgmii(struct phy *phy, enum phy_mode mode)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
- mvebu_comphy_ethernet_init_reset(lane, PHY_MODE_SGMII);
+ mvebu_comphy_ethernet_init_reset(lane, mode);
val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
@@ -487,7 +497,8 @@ static int mvebu_comphy_power_on(struct phy *phy)
switch (lane->mode) {
case PHY_MODE_SGMII:
- ret = mvebu_comphy_set_mode_sgmii(phy);
+ case PHY_MODE_2500SGMII:
+ ret = mvebu_comphy_set_mode_sgmii(phy, lane->mode);
break;
case PHY_MODE_10GKR:
ret = mvebu_comphy_set_mode_10gkr(phy);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index b1ae1618fefe..fee9225ca559 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1622,22 +1622,30 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
if (!need_valid_mask) {
irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
- chip->ngpio, NUMA_NO_NODE);
+ community->npins, NUMA_NO_NODE);
if (irq_base < 0) {
dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
return irq_base;
}
- } else {
- irq_base = 0;
}
- ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
+ ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
return ret;
}
+ if (!need_valid_mask) {
+ for (i = 0; i < community->ngpio_ranges; i++) {
+ range = &community->gpio_ranges[i];
+
+ irq_domain_associate_many(chip->irq.domain, irq_base,
+ range->base, range->npins);
+ irq_base += range->npins;
+ }
+ }
+
gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
chv_gpio_irq_handler);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 8870a4100164..fee3435a6f15 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -36,6 +36,27 @@
.npins = ((e) - (s) + 1), \
}
+#define SPTH_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define SPTH_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = SPT_PAD_OWN, \
+ .padcfglock_offset = SPT_PADCFGLOCK, \
+ .hostown_offset = SPT_HOSTSW_OWN, \
+ .ie_offset = SPT_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
/* Sunrisepoint-LP */
static const struct pinctrl_pin_desc sptlp_pins[] = {
/* GPP_A */
@@ -531,10 +552,28 @@ static const struct intel_function spth_functions[] = {
FUNCTION("i2c2", spth_i2c2_groups),
};
+static const struct intel_padgroup spth_community0_gpps[] = {
+ SPTH_GPP(0, 0, 23, 0), /* GPP_A */
+ SPTH_GPP(1, 24, 47, 24), /* GPP_B */
+};
+
+static const struct intel_padgroup spth_community1_gpps[] = {
+ SPTH_GPP(0, 48, 71, 48), /* GPP_C */
+ SPTH_GPP(1, 72, 95, 72), /* GPP_D */
+ SPTH_GPP(2, 96, 108, 96), /* GPP_E */
+ SPTH_GPP(3, 109, 132, 120), /* GPP_F */
+ SPTH_GPP(4, 133, 156, 144), /* GPP_G */
+ SPTH_GPP(5, 157, 180, 168), /* GPP_H */
+};
+
+static const struct intel_padgroup spth_community3_gpps[] = {
+ SPTH_GPP(0, 181, 191, 192), /* GPP_I */
+};
+
static const struct intel_community spth_communities[] = {
- SPT_COMMUNITY(0, 0, 47),
- SPT_COMMUNITY(1, 48, 180),
- SPT_COMMUNITY(2, 181, 191),
+ SPTH_COMMUNITY(0, 0, 47, spth_community0_gpps),
+ SPTH_COMMUNITY(1, 48, 180, spth_community1_gpps),
+ SPTH_COMMUNITY(2, 181, 191, spth_community3_gpps),
};
static const struct intel_pinctrl_soc_data spth_soc_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 4b91ff74779b..99a6ceac8e53 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -898,7 +898,7 @@ static struct meson_bank meson_axg_periphs_banks[] = {
static struct meson_bank meson_axg_aobus_banks[] = {
/* name first last irq pullen pull dir out in */
- BANK("AO", GPIOAO_0, GPIOAO_9, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 16, 0, 0, 0, 0, 0, 16, 1, 0),
};
static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index d8599736a41a..5c47f451e43b 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -1,33 +1,20 @@
-/*
- * chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices.
- *
- * Author : Benson Leung <bleung@chromium.org>
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Driver to instantiate Chromebook i2c/smbus devices.
+//
+// Copyright (C) 2012 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dmi.h>
#include <linux/i2c.h>
-#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define ATMEL_TP_I2C_ADDR 0x4b
#define ATMEL_TP_I2C_BL_ADDR 0x25
@@ -38,18 +25,11 @@
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
-#define MAX_I2C_DEVICE_DEFERRALS 5
-
-static struct i2c_client *als;
-static struct i2c_client *tp;
-static struct i2c_client *ts;
-
static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
"Synopsys DesignWare I2C adapter",
- "Synopsys DesignWare I2C adapter",
};
/* Keep this enum consistent with i2c_adapter_names */
@@ -57,126 +37,41 @@ enum i2c_adapter_type {
I2C_ADAPTER_SMBUS = 0,
I2C_ADAPTER_VGADDC,
I2C_ADAPTER_PANEL,
- I2C_ADAPTER_DESIGNWARE_0,
- I2C_ADAPTER_DESIGNWARE_1,
-};
-
-enum i2c_peripheral_state {
- UNPROBED = 0,
- PROBED,
- TIMEDOUT,
+ I2C_ADAPTER_DESIGNWARE,
};
struct i2c_peripheral {
- int (*add)(enum i2c_adapter_type type);
- enum i2c_adapter_type type;
- enum i2c_peripheral_state state;
- int tries;
-};
-
-#define MAX_I2C_PERIPHERALS 4
+ struct i2c_board_info board_info;
+ unsigned short alt_addr;
-struct chromeos_laptop {
- struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
-};
-
-static struct chromeos_laptop *cros_laptop;
-
-static struct i2c_board_info cyapa_device = {
- I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
- .flags = I2C_CLIENT_WAKE,
-};
+ const char *dmi_name;
+ unsigned long irqflags;
+ struct resource irq_resource;
-static struct i2c_board_info elantech_device = {
- I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
- .flags = I2C_CLIENT_WAKE,
-};
-
-static struct i2c_board_info isl_als_device = {
- I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
-};
-
-static struct i2c_board_info tsl2583_als_device = {
- I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
-};
-
-static struct i2c_board_info tsl2563_als_device = {
- I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
-};
-
-static int mxt_t19_keys[] = {
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- BTN_LEFT
-};
-
-static struct mxt_platform_data atmel_224s_tp_platform_data = {
- .irqflags = IRQF_TRIGGER_FALLING,
- .t19_num_keys = ARRAY_SIZE(mxt_t19_keys),
- .t19_keymap = mxt_t19_keys,
- .suspend_mode = MXT_SUSPEND_T9_CTRL,
-};
+ enum i2c_adapter_type type;
+ u32 pci_devid;
-static struct i2c_board_info atmel_224s_tp_device = {
- I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
- .platform_data = &atmel_224s_tp_platform_data,
- .flags = I2C_CLIENT_WAKE,
+ struct i2c_client *client;
};
-static struct mxt_platform_data atmel_1664s_platform_data = {
- .irqflags = IRQF_TRIGGER_FALLING,
- .suspend_mode = MXT_SUSPEND_T9_CTRL,
+struct chromeos_laptop {
+ /*
+ * Note that we can't mark this pointer as const because
+ * i2c_new_probed_device() changes passed in I2C board info, so.
+ */
+ struct i2c_peripheral *i2c_peripherals;
+ unsigned int num_i2c_peripherals;
};
-static struct i2c_board_info atmel_1664s_device = {
- I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
- .platform_data = &atmel_1664s_platform_data,
- .flags = I2C_CLIENT_WAKE,
-};
+static const struct chromeos_laptop *cros_laptop;
-static struct i2c_client *__add_probed_i2c_device(
- const char *name,
- int bus,
- struct i2c_board_info *info,
- const unsigned short *alt_addr_list)
+static struct i2c_client *
+chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ unsigned short alt_addr)
{
- const struct dmi_device *dmi_dev;
- const struct dmi_dev_onboard *dev_data;
- struct i2c_adapter *adapter;
- struct i2c_client *client = NULL;
const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
-
- if (bus < 0)
- return NULL;
- /*
- * If a name is specified, look for irq platform information stashed
- * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware.
- */
- if (name) {
- dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL);
- if (!dmi_dev) {
- pr_err("%s failed to dmi find device %s.\n",
- __func__,
- name);
- return NULL;
- }
- dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data;
- if (!dev_data) {
- pr_err("%s failed to get data from dmi for %s.\n",
- __func__, name);
- return NULL;
- }
- info->irq = dev_data->instance;
- }
-
- adapter = i2c_get_adapter(bus);
- if (!adapter) {
- pr_err("%s failed to get i2c adapter %d.\n", __func__, bus);
- return NULL;
- }
+ struct i2c_client *client;
/*
* Add the i2c device. If we can't detect it at the primary
@@ -184,339 +79,345 @@ static struct i2c_client *__add_probed_i2c_device(
* structure gets assigned primary address.
*/
client = i2c_new_probed_device(adapter, info, addr_list, NULL);
- if (!client && alt_addr_list) {
+ if (!client && alt_addr) {
struct i2c_board_info dummy_info = {
I2C_BOARD_INFO("dummy", info->addr),
};
+ const unsigned short alt_addr_list[] = {
+ alt_addr, I2C_CLIENT_END
+ };
struct i2c_client *dummy;
dummy = i2c_new_probed_device(adapter, &dummy_info,
alt_addr_list, NULL);
if (dummy) {
- pr_debug("%s %d-%02x is probed at %02x\n",
- __func__, bus, info->addr, dummy->addr);
+ pr_debug("%d-%02x is probed at %02x\n",
+ adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
client = i2c_new_device(adapter, info);
}
}
if (!client)
- pr_notice("%s failed to register device %d-%02x\n",
- __func__, bus, info->addr);
+ pr_debug("failed to register device %d-%02x\n",
+ adapter->nr, info->addr);
else
- pr_debug("%s added i2c device %d-%02x\n",
- __func__, bus, info->addr);
+ pr_debug("added i2c device %d-%02x\n",
+ adapter->nr, info->addr);
- i2c_put_adapter(adapter);
return client;
}
-struct i2c_lookup {
- const char *name;
- int instance;
- int n;
-};
-
-static int __find_i2c_adap(struct device *dev, void *data)
-{
- struct i2c_lookup *lookup = data;
- static const char *prefix = "i2c-";
- struct i2c_adapter *adapter;
-
- if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
- return 0;
- adapter = to_i2c_adapter(dev);
- if (strncmp(adapter->name, lookup->name, strlen(lookup->name)) == 0 &&
- lookup->n++ == lookup->instance)
- return 1;
- return 0;
-}
-
-static int find_i2c_adapter_num(enum i2c_adapter_type type)
-{
- struct device *dev = NULL;
- struct i2c_adapter *adapter;
- struct i2c_lookup lookup;
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.name = i2c_adapter_names[type];
- lookup.instance = (type == I2C_ADAPTER_DESIGNWARE_1) ? 1 : 0;
-
- /* find the adapter by name */
- dev = bus_find_device(&i2c_bus_type, NULL, &lookup, __find_i2c_adap);
- if (!dev) {
- /* Adapters may appear later. Deferred probing will retry */
- pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
- lookup.name);
- return -ENODEV;
- }
- adapter = to_i2c_adapter(dev);
- return adapter->nr;
-}
-
-/*
- * Takes a list of addresses in addrs as such :
- * { addr1, ... , addrn, I2C_CLIENT_END };
- * add_probed_i2c_device will use i2c_new_probed_device
- * and probe for devices at all of the addresses listed.
- * Returns NULL if no devices found.
- * See Documentation/i2c/instantiating-devices for more information.
- */
-static struct i2c_client *add_probed_i2c_device(
- const char *name,
- enum i2c_adapter_type type,
- struct i2c_board_info *info,
- const unsigned short *addrs)
+static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
{
- return __add_probed_i2c_device(name,
- find_i2c_adapter_num(type),
- info,
- addrs);
-}
+ struct pci_dev *pdev;
-/*
- * Probes for a device at a single address, the one provided by
- * info->addr.
- * Returns NULL if no device found.
- */
-static struct i2c_client *add_i2c_device(const char *name,
- enum i2c_adapter_type type,
- struct i2c_board_info *info)
-{
- return __add_probed_i2c_device(name,
- find_i2c_adapter_num(type),
- info,
- NULL);
-}
-
-static int setup_cyapa_tp(enum i2c_adapter_type type)
-{
- if (tp)
- return 0;
+ if (!dev_is_pci(dev))
+ return false;
- /* add cyapa touchpad */
- tp = add_i2c_device("trackpad", type, &cyapa_device);
- return (!tp) ? -EAGAIN : 0;
+ pdev = to_pci_dev(dev);
+ return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
}
-static int setup_atmel_224s_tp(enum i2c_adapter_type type)
+static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
{
- const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
- I2C_CLIENT_END };
- if (tp)
- return 0;
-
- /* add atmel mxt touchpad */
- tp = add_probed_i2c_device("trackpad", type,
- &atmel_224s_tp_device, addr_list);
- return (!tp) ? -EAGAIN : 0;
-}
+ struct i2c_peripheral *i2c_dev;
+ int i;
-static int setup_elantech_tp(enum i2c_adapter_type type)
-{
- if (tp)
- return 0;
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
- /* add elantech touchpad */
- tp = add_i2c_device("trackpad", type, &elantech_device);
- return (!tp) ? -EAGAIN : 0;
-}
+ /* Skip devices already created */
+ if (i2c_dev->client)
+ continue;
-static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
-{
- const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
- I2C_CLIENT_END };
- if (ts)
- return 0;
-
- /* add atmel mxt touch device */
- ts = add_probed_i2c_device("touchscreen", type,
- &atmel_1664s_device, addr_list);
- return (!ts) ? -EAGAIN : 0;
-}
+ if (strncmp(adapter->name, i2c_adapter_names[i2c_dev->type],
+ strlen(i2c_adapter_names[i2c_dev->type])))
+ continue;
-static int setup_isl29018_als(enum i2c_adapter_type type)
-{
- if (als)
- return 0;
+ if (i2c_dev->pci_devid &&
+ !chromeos_laptop_match_adapter_devid(adapter->dev.parent,
+ i2c_dev->pci_devid)) {
+ continue;
+ }
- /* add isl29018 light sensor */
- als = add_i2c_device("lightsensor", type, &isl_als_device);
- return (!als) ? -EAGAIN : 0;
+ i2c_dev->client =
+ chromes_laptop_instantiate_i2c_device(adapter,
+ &i2c_dev->board_info,
+ i2c_dev->alt_addr);
+ }
}
-static int setup_tsl2583_als(enum i2c_adapter_type type)
+static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
{
- if (als)
- return 0;
-
- /* add tsl2583 light sensor */
- als = add_i2c_device(NULL, type, &tsl2583_als_device);
- return (!als) ? -EAGAIN : 0;
-}
+ struct i2c_peripheral *i2c_dev;
+ int i;
-static int setup_tsl2563_als(enum i2c_adapter_type type)
-{
- if (als)
- return 0;
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
- /* add tsl2563 light sensor */
- als = add_i2c_device(NULL, type, &tsl2563_als_device);
- return (!als) ? -EAGAIN : 0;
+ if (i2c_dev->client == client)
+ i2c_dev->client = NULL;
+ }
}
-static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
+static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- cros_laptop = (void *)id->driver_data;
- pr_debug("DMI Matched %s.\n", id->ident);
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->type == &i2c_adapter_type)
+ chromeos_laptop_check_adapter(to_i2c_adapter(dev));
+ break;
+
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ if (dev->type == &i2c_client_type)
+ chromeos_laptop_detach_i2c_client(to_i2c_client(dev));
+ break;
+ }
- /* Indicate to dmi_scan that processing is done. */
- return 1;
+ return 0;
}
-static int chromeos_laptop_probe(struct platform_device *pdev)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
- struct i2c_peripheral *i2c_dev;
-
- i2c_dev = &cros_laptop->i2c_peripherals[i];
-
- /* No more peripherals. */
- if (i2c_dev->add == NULL)
- break;
-
- if (i2c_dev->state == TIMEDOUT || i2c_dev->state == PROBED)
- continue;
-
- /*
- * Check that the i2c adapter is present.
- * -EPROBE_DEFER if missing as the adapter may appear much
- * later.
- */
- if (find_i2c_adapter_num(i2c_dev->type) == -ENODEV) {
- ret = -EPROBE_DEFER;
- continue;
- }
-
- /* Add the device. */
- if (i2c_dev->add(i2c_dev->type) == -EAGAIN) {
- /*
- * Set -EPROBE_DEFER a limited num of times
- * if device is not successfully added.
- */
- if (++i2c_dev->tries < MAX_I2C_DEVICE_DEFERRALS) {
- ret = -EPROBE_DEFER;
- } else {
- /* Ran out of tries. */
- pr_notice("%s: Ran out of tries for device.\n",
- __func__);
- i2c_dev->state = TIMEDOUT;
- }
- } else {
- i2c_dev->state = PROBED;
- }
- }
+static struct notifier_block chromeos_laptop_i2c_notifier = {
+ .notifier_call = chromeos_laptop_i2c_notifier_call,
+};
- return ret;
+#define DECLARE_CROS_LAPTOP(_name) \
+static const struct chromeos_laptop _name __initconst = { \
+ .i2c_peripherals = _name##_peripherals, \
+ .num_i2c_peripherals = ARRAY_SIZE(_name##_peripherals), \
}
-static struct chromeos_laptop samsung_series_5_550 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral samsung_series_5_550_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(samsung_series_5_550);
-static struct chromeos_laptop samsung_series_5 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral samsung_series_5_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(samsung_series_5);
-static struct chromeos_laptop chromebook_pixel = {
- .i2c_peripherals = {
- /* Touch Screen. */
- { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
- /* Touchpad. */
- { .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
- },
+static const int chromebook_pixel_tp_keys[] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
};
-static struct chromeos_laptop hp_chromebook_14 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static const struct property_entry
+chromebook_pixel_trackpad_props[] __initconst = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_pixel_tp_keys),
+ { }
+};
+
+static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
+ /* Touch Screen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_PANEL,
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_tp",
+ ATMEL_TP_I2C_ADDR),
+ .properties =
+ chromebook_pixel_trackpad_props,
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_VGADDC,
+ .alt_addr = ATMEL_TP_I2C_BL_ADDR,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_PANEL,
},
};
+DECLARE_CROS_LAPTOP(chromebook_pixel);
-static struct chromeos_laptop dell_chromebook_11 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Elan Touchpad option. */
- { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static struct i2c_peripheral hp_chromebook_14_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(hp_chromebook_14);
-static struct chromeos_laptop toshiba_cb35 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static struct i2c_peripheral dell_chromebook_11_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(dell_chromebook_11);
-static struct chromeos_laptop acer_c7_chromebook = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral toshiba_cb35_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(toshiba_cb35);
-static struct chromeos_laptop acer_ac700 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral acer_c7_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(acer_c7_chromebook);
-static struct chromeos_laptop acer_c720 = {
- .i2c_peripherals = {
- /* Touchscreen. */
- { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Elan Touchpad option. */
- { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
+static struct i2c_peripheral acer_ac700_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(acer_ac700);
-static struct chromeos_laptop hp_pavilion_14_chromebook = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
+ /* Touchscreen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
},
};
+DECLARE_CROS_LAPTOP(acer_c720);
-static struct chromeos_laptop cr48 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral
+hp_pavilion_14_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(hp_pavilion_14_chromebook);
-#define _CBDD(board_) \
- .callback = chromeos_laptop_dmi_matched, \
- .driver_data = (void *)&board_
+static struct i2c_peripheral cr48_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(cr48);
static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
{
@@ -525,14 +426,14 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
},
- _CBDD(samsung_series_5_550),
+ .driver_data = (void *)&samsung_series_5_550,
},
{
.ident = "Samsung Series 5",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
- _CBDD(samsung_series_5),
+ .driver_data = (void *)&samsung_series_5,
},
{
.ident = "Chromebook Pixel",
@@ -540,7 +441,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
- _CBDD(chromebook_pixel),
+ .driver_data = (void *)&chromebook_pixel,
},
{
.ident = "Wolf",
@@ -548,7 +449,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"),
},
- _CBDD(dell_chromebook_11),
+ .driver_data = (void *)&dell_chromebook_11,
},
{
.ident = "HP Chromebook 14",
@@ -556,7 +457,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Falco"),
},
- _CBDD(hp_chromebook_14),
+ .driver_data = (void *)&hp_chromebook_14,
},
{
.ident = "Toshiba CB35",
@@ -564,99 +465,214 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Leon"),
},
- _CBDD(toshiba_cb35),
+ .driver_data = (void *)&toshiba_cb35,
},
{
.ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
},
- _CBDD(acer_c7_chromebook),
+ .driver_data = (void *)&acer_c7_chromebook,
},
{
.ident = "Acer AC700",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
- _CBDD(acer_ac700),
+ .driver_data = (void *)&acer_ac700,
},
{
.ident = "Acer C720",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
},
- _CBDD(acer_c720),
+ .driver_data = (void *)&acer_c720,
},
{
.ident = "HP Pavilion 14 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
},
- _CBDD(hp_pavilion_14_chromebook),
+ .driver_data = (void *)&hp_pavilion_14_chromebook,
},
{
.ident = "Cr-48",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
- _CBDD(cr48),
+ .driver_data = (void *)&cr48,
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
-static struct platform_device *cros_platform_device;
+static int __init chromeos_laptop_scan_adapter(struct device *dev, void *data)
+{
+ struct i2c_adapter *adapter;
-static struct platform_driver cros_platform_driver = {
- .driver = {
- .name = "chromeos_laptop",
- },
- .probe = chromeos_laptop_probe,
-};
+ adapter = i2c_verify_adapter(dev);
+ if (adapter)
+ chromeos_laptop_check_adapter(adapter);
+
+ return 0;
+}
+
+static int __init chromeos_laptop_get_irq_from_dmi(const char *dmi_name)
+{
+ const struct dmi_device *dmi_dev;
+ const struct dmi_dev_onboard *dev_data;
+
+ dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, dmi_name, NULL);
+ if (!dmi_dev) {
+ pr_err("failed to find DMI device '%s'\n", dmi_name);
+ return -ENOENT;
+ }
+
+ dev_data = dmi_dev->device_data;
+ if (!dev_data) {
+ pr_err("failed to get data from DMI for '%s'\n", dmi_name);
+ return -EINVAL;
+ }
+
+ return dev_data->instance;
+}
+
+static int __init chromeos_laptop_setup_irq(struct i2c_peripheral *i2c_dev)
+{
+ int irq;
+
+ if (i2c_dev->dmi_name) {
+ irq = chromeos_laptop_get_irq_from_dmi(i2c_dev->dmi_name);
+ if (irq < 0)
+ return irq;
+
+ i2c_dev->irq_resource = (struct resource)
+ DEFINE_RES_NAMED(irq, 1, NULL,
+ IORESOURCE_IRQ | i2c_dev->irqflags);
+ i2c_dev->board_info.resources = &i2c_dev->irq_resource;
+ i2c_dev->board_info.num_resources = 1;
+ }
+
+ return 0;
+}
+
+static struct chromeos_laptop * __init
+chromeos_laptop_prepare(const struct chromeos_laptop *src)
+{
+ struct chromeos_laptop *cros_laptop;
+ struct i2c_peripheral *i2c_dev;
+ struct i2c_board_info *info;
+ int error;
+ int i;
+
+ cros_laptop = kzalloc(sizeof(*cros_laptop), GFP_KERNEL);
+ if (!cros_laptop)
+ return ERR_PTR(-ENOMEM);
+
+ cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!cros_laptop->i2c_peripherals) {
+ error = -ENOMEM;
+ goto err_free_cros_laptop;
+ }
+
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+
+ error = chromeos_laptop_setup_irq(i2c_dev);
+ if (error)
+ goto err_destroy_cros_peripherals;
+
+ /* We need to deep-copy properties */
+ if (info->properties) {
+ info->properties =
+ property_entries_dup(info->properties);
+ if (IS_ERR(info->properties)) {
+ error = PTR_ERR(info->properties);
+ goto err_destroy_cros_peripherals;
+ }
+ }
+ }
+
+ return cros_laptop;
+
+err_destroy_cros_peripherals:
+ while (--i >= 0) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+ if (info->properties)
+ property_entries_free(info->properties);
+ }
+ kfree(cros_laptop->i2c_peripherals);
+err_free_cros_laptop:
+ kfree(cros_laptop);
+ return ERR_PTR(error);
+}
+
+static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
+{
+ struct i2c_peripheral *i2c_dev;
+ struct i2c_board_info *info;
+ int i;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+
+ if (i2c_dev->client)
+ i2c_unregister_device(i2c_dev->client);
+
+ if (info->properties)
+ property_entries_free(info->properties);
+ }
+
+ kfree(cros_laptop->i2c_peripherals);
+ kfree(cros_laptop);
+}
static int __init chromeos_laptop_init(void)
{
- int ret;
+ const struct dmi_system_id *dmi_id;
+ int error;
- if (!dmi_check_system(chromeos_laptop_dmi_table)) {
- pr_debug("%s unsupported system.\n", __func__);
+ dmi_id = dmi_first_match(chromeos_laptop_dmi_table);
+ if (!dmi_id) {
+ pr_debug("unsupported system\n");
return -ENODEV;
}
- ret = platform_driver_register(&cros_platform_driver);
- if (ret)
- return ret;
+ pr_debug("DMI Matched %s\n", dmi_id->ident);
+
+ cros_laptop = chromeos_laptop_prepare((void *)dmi_id->driver_data);
+ if (IS_ERR(cros_laptop))
+ return PTR_ERR(cros_laptop);
- cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
- if (!cros_platform_device) {
- ret = -ENOMEM;
- goto fail_platform_device1;
+ error = bus_register_notifier(&i2c_bus_type,
+ &chromeos_laptop_i2c_notifier);
+ if (error) {
+ pr_err("failed to register i2c bus notifier: %d\n", error);
+ chromeos_laptop_destroy(cros_laptop);
+ return error;
}
- ret = platform_device_add(cros_platform_device);
- if (ret)
- goto fail_platform_device2;
+ /*
+ * Scan adapters that have been registered before we installed
+ * the notifier to make sure we do not miss any devices.
+ */
+ i2c_for_each_dev(NULL, chromeos_laptop_scan_adapter);
return 0;
-
-fail_platform_device2:
- platform_device_put(cros_platform_device);
-fail_platform_device1:
- platform_driver_unregister(&cros_platform_driver);
- return ret;
}
static void __exit chromeos_laptop_exit(void)
{
- if (als)
- i2c_unregister_device(als);
- if (tp)
- i2c_unregister_device(tp);
- if (ts)
- i2c_unregister_device(ts);
-
- platform_device_unregister(cros_platform_device);
- platform_driver_unregister(&cros_platform_driver);
+ bus_unregister_notifier(&i2c_bus_type, &chromeos_laptop_i2c_notifier);
+ chromeos_laptop_destroy(cros_laptop);
}
module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 0e88e18362c1..cc265ed8deb7 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -211,6 +211,58 @@ static int cros_ec_console_log_release(struct inode *inode, struct file *file)
return 0;
}
+static ssize_t cros_ec_pdinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_response_usb_pd_control_v1 resp;
+ struct ec_params_usb_pd_control params;
+ };
+ } __packed ec_buf;
+ struct cros_ec_command *msg;
+ struct ec_response_usb_pd_control_v1 *resp;
+ struct ec_params_usb_pd_control *params;
+ int i;
+
+ msg = &ec_buf.msg;
+ params = (struct ec_params_usb_pd_control *)msg->data;
+ resp = (struct ec_response_usb_pd_control_v1 *)msg->data;
+
+ msg->command = EC_CMD_USB_PD_CONTROL;
+ msg->version = 1;
+ msg->insize = sizeof(*resp);
+ msg->outsize = sizeof(*params);
+
+ /*
+ * Read status from all PD ports until failure, typically caused
+ * by attempting to read status on a port that doesn't exist.
+ */
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; ++i) {
+ params->port = i;
+ params->role = 0;
+ params->mux = 0;
+ params->swap = 0;
+
+ if (cros_ec_cmd_xfer_status(ec_dev, msg) < 0)
+ break;
+
+ p += scnprintf(p, sizeof(read_buf) + read_buf - p,
+ "p%d: %s en:%.2x role:%.2x pol:%.2x\n", i,
+ resp->state, resp->enabled, resp->role,
+ resp->polarity);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ read_buf, p - read_buf);
+}
+
const struct file_operations cros_ec_console_log_fops = {
.owner = THIS_MODULE,
.open = cros_ec_console_log_open,
@@ -220,6 +272,13 @@ const struct file_operations cros_ec_console_log_fops = {
.release = cros_ec_console_log_release,
};
+const struct file_operations cros_ec_pdinfo_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = cros_ec_pdinfo_read,
+ .llseek = default_llseek,
+};
+
static int ec_read_version_supported(struct cros_ec_dev *ec)
{
struct ec_params_get_cmd_versions_v1 *params;
@@ -288,7 +347,7 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
init_waitqueue_head(&debug_info->log_wq);
if (!debugfs_create_file("console_log",
- S_IFREG | S_IRUGO,
+ S_IFREG | 0444,
debug_info->dir,
debug_info,
&cros_ec_console_log_fops))
@@ -341,7 +400,7 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.size = ret;
if (!debugfs_create_blob("panicinfo",
- S_IFREG | S_IRUGO,
+ S_IFREG | 0444,
debug_info->dir,
&debug_info->panicinfo_blob)) {
ret = -ENOMEM;
@@ -355,6 +414,15 @@ free:
return ret;
}
+static int cros_ec_create_pdinfo(struct cros_ec_debugfs *debug_info)
+{
+ if (!debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
+ &cros_ec_pdinfo_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+
int cros_ec_debugfs_init(struct cros_ec_dev *ec)
{
struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
@@ -379,6 +447,10 @@ int cros_ec_debugfs_init(struct cros_ec_dev *ec)
if (ret)
goto remove_debugfs;
+ ret = cros_ec_create_pdinfo(debug_info);
+ if (ret)
+ goto remove_debugfs;
+
ec->debug_info = debug_info;
return 0;
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index af89e82eecd2..3682e1539251 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/suspend.h>
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
@@ -235,6 +236,9 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
cros_ec_get_next_event(ec_dev, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier, 0,
ec_dev);
+
+ if (value == ACPI_NOTIFY_DEVICE_WAKE)
+ pm_system_wakeup();
}
static int cros_ec_lpc_probe(struct platform_device *pdev)
@@ -342,6 +346,18 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
},
},
{
+ /*
+ * If the box is running custom coreboot firmware then the
+ * DMI BIOS version string will not be matched by "Google_",
+ * but the system vendor string will still be matched by
+ * "GOOGLE".
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ },
+ },
+ {
/* x86-link, the Chromebook Pixel. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index da0a719d32f7..5a6db3fe213a 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,10 +34,12 @@
#include <linux/types.h>
#include <linux/uaccess.h>
+#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
+
/* Accessor functions */
-static ssize_t show_ec_reboot(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t reboot_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int count = 0;
@@ -48,9 +50,9 @@ static ssize_t show_ec_reboot(struct device *dev,
return count;
}
-static ssize_t store_ec_reboot(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t reboot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
static const struct {
const char * const str;
@@ -70,8 +72,7 @@ static ssize_t store_ec_reboot(struct device *dev,
int got_cmd = 0, offset = 0;
int i;
int ret;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*param), GFP_KERNEL);
if (!msg)
@@ -114,22 +115,16 @@ static ssize_t store_ec_reboot(struct device *dev,
msg->command = EC_CMD_REBOOT_EC + ec->cmd_offset;
msg->outsize = sizeof(*param);
msg->insize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0) {
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
count = ret;
- goto exit;
- }
- if (msg->result != EC_RES_SUCCESS) {
- dev_dbg(ec->dev, "EC result %d\n", msg->result);
- count = -EINVAL;
- }
exit:
kfree(msg);
return count;
}
-static ssize_t show_ec_version(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
static const char * const image_names[] = {"unknown", "RO", "RW"};
struct ec_response_get_version *r_ver;
@@ -138,8 +133,7 @@ static ssize_t show_ec_version(struct device *dev,
struct cros_ec_command *msg;
int ret;
int count = 0;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
if (!msg)
@@ -150,17 +144,11 @@ static ssize_t show_ec_version(struct device *dev,
msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_ver);
msg->outsize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count = ret;
goto exit;
}
- if (msg->result != EC_RES_SUCCESS) {
- count = scnprintf(buf, PAGE_SIZE,
- "ERROR: EC returned %d\n", msg->result);
- goto exit;
- }
-
r_ver = (struct ec_response_get_version *)msg->data;
/* Strings should be null-terminated, but let's be sure. */
r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
@@ -237,14 +225,13 @@ exit:
return count;
}
-static ssize_t show_ec_flashinfo(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t flashinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ec_response_flash_info *resp;
struct cros_ec_command *msg;
int ret;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
if (!msg)
@@ -255,14 +242,9 @@ static ssize_t show_ec_flashinfo(struct device *dev,
msg->command = EC_CMD_FLASH_INFO + ec->cmd_offset;
msg->insize = sizeof(*resp);
msg->outsize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = scnprintf(buf, PAGE_SIZE,
- "ERROR: EC returned %d\n", msg->result);
- goto exit;
- }
resp = (struct ec_response_flash_info *)msg->data;
@@ -276,21 +258,102 @@ exit:
return ret;
}
+/* Keyboard wake angle control */
+static ssize_t kb_wake_angle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_response_motion_sense *resp;
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = EC_MOTION_SENSE_NO_VALUE;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->kb_wake_angle.ret);
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t kb_wake_angle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ u16 angle;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &angle);
+ if (ret)
+ return ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = angle;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ kfree(msg);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
/* Module initialization */
-static DEVICE_ATTR(reboot, S_IWUSR | S_IRUGO, show_ec_reboot, store_ec_reboot);
-static DEVICE_ATTR(version, S_IRUGO, show_ec_version, NULL);
-static DEVICE_ATTR(flashinfo, S_IRUGO, show_ec_flashinfo, NULL);
+static DEVICE_ATTR_RW(reboot);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(flashinfo);
+static DEVICE_ATTR_RW(kb_wake_angle);
static struct attribute *__ec_attrs[] = {
+ &dev_attr_kb_wake_angle.attr,
&dev_attr_reboot.attr,
&dev_attr_version.attr,
&dev_attr_flashinfo.attr,
NULL,
};
+static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
+ return 0;
+
+ return a->mode;
+}
+
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
+ .is_visible = cros_ec_ctrl_visible,
};
EXPORT_SYMBOL(cros_ec_attr_group);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 39d06dd1f63a..566644bb496a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -154,7 +154,7 @@ config DELL_LAPTOP
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
depends on SERIO_I8042
- select DELL_SMBIOS
+ depends on DELL_SMBIOS
select POWER_SUPPLY
select LEDS_CLASS
select NEW_LEDS
@@ -168,8 +168,8 @@ config DELL_WMI
depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on DELL_SMBIOS
select DELL_WMI_DESCRIPTOR
- select DELL_SMBIOS
select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Dell laptops.
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index d4aeac3477f5..f086469ea740 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -178,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
- if (data->wq)
+ if (data->wq) {
+ devm_led_classdev_unregister(&adev->dev, &data->led);
destroy_workqueue(data->wq);
+ }
return 0;
}
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index b3285175f20f..78ccf936d356 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -452,7 +452,6 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
u64 ns;
- u32 remainder;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
@@ -461,8 +460,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
ns = pch_systime_read(regs);
spin_unlock_irqrestore(&pch_dev->register_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -474,8 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp,
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
struct pch_ts_regs __iomem *regs = pch_dev->regs;
- ns = ts->tv_sec * 1000000000ULL;
- ns += ts->tv_nsec;
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&pch_dev->register_lock, flags);
pch_systime_write(regs, ns);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 38d49dbbf9b7..4635cb35008c 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -200,10 +200,10 @@ config PWM_IMX
will be called pwm-imx.
config PWM_JZ4740
- tristate "Ingenic JZ4740 PWM support"
- depends on MACH_JZ4740
+ tristate "Ingenic JZ47xx PWM support"
+ depends on MACH_INGENIC
help
- Generic PWM framework driver for Ingenic JZ4740 based
+ Generic PWM framework driver for Ingenic JZ47xx based
machines.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index acd3ce8ecf3f..4fb1be246c44 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -401,7 +401,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL) {
err = -ENOMEM;
- dev_err(&pdev->dev, "failed to allocate memory\n");
goto err_free_tc;
}
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 2ba5c3a398ff..08cbe8120588 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -35,6 +35,7 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_STOPEN (1 << 25)
#define MX3_PWMCR_DOZEEN (1 << 24)
#define MX3_PWMCR_WAITEN (1 << 23)
#define MX3_PWMCR_DBGEN (1 << 22)
@@ -210,7 +211,7 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
writel(period_cycles, imx->mmio_base + MX3_PWMPR);
cr = MX3_PWMCR_PRESCALER(prescale) |
- MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH |
MX3_PWMCR_EN;
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index a75ff3622450..a7b134af5e04 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -71,9 +72,15 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
+ /* Disable PWM output.
+ * In TCU2 mode (channel 1/2 on JZ4750+), this must be done before the
+ * counter is stopped, while in TCU1 mode the order does not matter.
+ */
ctrl &= ~JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_disable(pwm->hwpwm);
jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+
+ /* Stop counter */
+ jz4740_timer_disable(pwm->hwpwm);
}
static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -124,10 +131,29 @@ static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+static int jz4740_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm, enum pwm_polarity polarity)
+{
+ uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
+
+ switch (polarity) {
+ case PWM_POLARITY_NORMAL:
+ ctrl &= ~JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ break;
+ case PWM_POLARITY_INVERSED:
+ ctrl |= JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ break;
+ }
+
+ jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ return 0;
+}
+
static const struct pwm_ops jz4740_pwm_ops = {
.request = jz4740_pwm_request,
.free = jz4740_pwm_free,
.config = jz4740_pwm_config,
+ .set_polarity = jz4740_pwm_set_polarity,
.enable = jz4740_pwm_enable,
.disable = jz4740_pwm_disable,
.owner = THIS_MODULE,
@@ -149,6 +175,8 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
+ jz4740->chip.of_xlate = of_pwm_xlate_with_flags;
+ jz4740->chip.of_pwm_n_cells = 3;
platform_set_drvdata(pdev, jz4740);
@@ -162,9 +190,20 @@ static int jz4740_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&jz4740->chip);
}
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_pwm_dt_ids[] = {
+ { .compatible = "ingenic,jz4740-pwm", },
+ { .compatible = "ingenic,jz4770-pwm", },
+ { .compatible = "ingenic,jz4780-pwm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_pwm_dt_ids);
+#endif
+
static struct platform_driver jz4740_pwm_driver = {
.driver = {
.name = "jz4740-pwm",
+ .of_match_table = of_match_ptr(jz4740_pwm_dt_ids),
},
.probe = jz4740_pwm_probe,
.remove = jz4740_pwm_remove,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index f5d97e0ad52b..328c124773b2 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -29,7 +29,9 @@
#define PWMGDUR 0x0c
#define PWMWAVENUM 0x28
#define PWMDWIDTH 0x2c
+#define PWM45DWIDTH_FIXUP 0x30
#define PWMTHRES 0x30
+#define PWM45THRES_FIXUP 0x34
#define PWM_CLK_DIV_MAX 7
@@ -54,6 +56,7 @@ static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
struct mtk_pwm_platform_data {
unsigned int num_pwms;
+ bool pwm45_fixup;
};
/**
@@ -66,6 +69,7 @@ struct mtk_pwm_chip {
struct pwm_chip chip;
void __iomem *regs;
struct clk *clks[MTK_CLK_MAX];
+ const struct mtk_pwm_platform_data *soc;
};
static const unsigned int mtk_pwm_reg_offset[] = {
@@ -131,18 +135,25 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
- u32 resolution, clkdiv = 0;
+ u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
+ reg_thres = PWMTHRES;
+ u64 resolution;
int ret;
ret = mtk_pwm_clk_enable(chip, pwm);
if (ret < 0)
return ret;
- resolution = NSEC_PER_SEC / clk_get_rate(clk);
+ /* Using resolution in picosecond gets accuracy higher */
+ resolution = (u64)NSEC_PER_SEC * 1000;
+ do_div(resolution, clk_get_rate(clk));
- while (period_ns / resolution > 8191) {
+ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
+ while (cnt_period > 8191) {
resolution *= 2;
clkdiv++;
+ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
+ resolution);
}
if (clkdiv > PWM_CLK_DIV_MAX) {
@@ -151,9 +162,19 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
+ if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
+ /*
+ * PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
+ * from the other PWMs on MT7623.
+ */
+ reg_width = PWM45DWIDTH_FIXUP;
+ reg_thres = PWM45THRES_FIXUP;
+ }
+
+ cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
- mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
- mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
+ mtk_pwm_writel(pc, pwm->hwpwm, reg_width, cnt_period);
+ mtk_pwm_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
mtk_pwm_clk_disable(chip, pwm);
@@ -211,6 +232,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
if (data == NULL)
return -EINVAL;
+ pc->soc = data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -251,14 +273,17 @@ static int mtk_pwm_remove(struct platform_device *pdev)
static const struct mtk_pwm_platform_data mt2712_pwm_data = {
.num_pwms = 8,
+ .pwm45_fixup = false,
};
static const struct mtk_pwm_platform_data mt7622_pwm_data = {
.num_pwms = 6,
+ .pwm45_fixup = false,
};
static const struct mtk_pwm_platform_data mt7623_pwm_data = {
.num_pwms = 5,
+ .pwm45_fixup = true,
};
static const struct of_device_id mtk_pwm_of_match[] = {
@@ -280,5 +305,4 @@ static struct platform_driver mtk_pwm_driver = {
module_platform_driver(mtk_pwm_driver);
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_ALIAS("platform:mtk-pwm");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index ed6007b27585..754fd9a98f6b 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -107,10 +107,8 @@ static int pwm_probe(struct platform_device *pdev)
int ret;
puv3 = devm_kzalloc(&pdev->dev, sizeof(*puv3), GFP_KERNEL);
- if (puv3 == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!puv3)
return -ENOMEM;
- }
puv3->clk = devm_clk_get(&pdev->dev, "OST_CLK");
if (IS_ERR(puv3->clk))
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 1c85ecc9e7ac..91d11f2e2fef 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -134,16 +134,12 @@ static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
static int rcar_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-
- return clk_prepare_enable(rp->clk);
+ return pm_runtime_get_sync(chip->dev);
}
static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-
- clk_disable_unprepare(rp->clk);
+ pm_runtime_put(chip->dev);
}
static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -156,8 +152,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (div < 0)
return div;
- /* Let the core driver set pwm->period if disabled and duty_ns == 0 */
- if (!pwm_is_enabled(pwm) && !duty_ns)
+ /*
+ * Let the core driver set pwm->period if disabled and duty_ns == 0.
+ * But, this driver should prevent to set the new duty_ns if current
+ * duty_cycle is not set
+ */
+ if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
return 0;
rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
@@ -258,11 +258,53 @@ static const struct of_device_id rcar_pwm_of_table[] = {
};
MODULE_DEVICE_TABLE(of, rcar_pwm_of_table);
+#ifdef CONFIG_PM_SLEEP
+static struct pwm_device *rcar_pwm_dev_to_pwm_dev(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = &rcar_pwm->chip;
+
+ return &chip->pwms[0];
+}
+
+static int rcar_pwm_suspend(struct device *dev)
+{
+ struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ return 0;
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rcar_pwm_resume(struct device *dev)
+{
+ struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ return 0;
+
+ pm_runtime_get_sync(dev);
+
+ rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle,
+ pwm->state.period);
+ if (pwm_is_enabled(pwm))
+ rcar_pwm_enable(pwm->chip, pwm);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume);
+
static struct platform_driver rcar_pwm_driver = {
.probe = rcar_pwm_probe,
.remove = rcar_pwm_remove,
.driver = {
.name = "pwm-rcar",
+ .pm = &rcar_pwm_pm_ops,
.of_match_table = of_match_ptr(rcar_pwm_of_table),
}
};
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 1ac9e4384142..7c13e2505080 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer PWM driver
*
@@ -5,8 +6,6 @@
*
* Author: Gerald Baeza <gerald.baeza@st.com>
*
- * License terms: GNU General Public License (GPL), version 2
- *
* Inspired by Gerald Baeza's pwm-stm32 driver
*/
@@ -203,6 +202,8 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.dev = &pdev->dev;
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
+ priv->chip.of_xlate = of_pwm_xlate_with_flags;
+ priv->chip.of_pwm_n_cells = 3;
ret = pwmchip_add(&priv->chip);
if (ret < 0)
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 6139512aab7b..2708212933f7 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Gerald Baeza <gerald.baeza@st.com>
*
- * License terms: GNU General Public License (GPL), version 2
- *
* Inspired by timer-stm32.c from Maxime Coquelin
* pwm-atmel.c from Bo Shen
*/
@@ -21,7 +20,7 @@
struct stm32_pwm {
struct pwm_chip chip;
- struct device *dev;
+ struct mutex lock; /* protect pwm config/enable */
struct clk *clk;
struct regmap *regmap;
u32 max_arr;
@@ -214,9 +213,23 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
+static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ int ret;
+
+ /* protect common prescaler for all active channels */
+ mutex_lock(&priv->lock);
+ ret = stm32_pwm_apply(chip, pwm, state);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
static const struct pwm_ops stm32pwm_ops = {
.owner = THIS_MODULE,
- .apply = stm32_pwm_apply,
+ .apply = stm32_pwm_apply_locked,
};
static int stm32_pwm_set_breakinput(struct stm32_pwm *priv,
@@ -336,6 +349,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->lock);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 334199c58f1d..470d4f71e7eb 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -73,7 +73,6 @@ static const u32 prescaler_table[] = {
struct sun4i_pwm_data {
bool has_prescaler_bypass;
- bool has_rdy;
unsigned int npwm;
};
@@ -117,7 +116,8 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- if ((val == PWM_PRESCAL_MASK) && sun4i_pwm->data->has_prescaler_bypass)
+ if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
+ sun4i_pwm->data->has_prescaler_bypass)
prescaler = 1;
else
prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
@@ -130,7 +130,8 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
else
state->polarity = PWM_POLARITY_INVERSED;
- if (val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
+ if ((val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm)) ==
+ BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
state->enabled = true;
else
state->enabled = false;
@@ -311,52 +312,37 @@ static const struct pwm_ops sun4i_pwm_ops = {
.owner = THIS_MODULE,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a10 = {
+static const struct sun4i_pwm_data sun4i_pwm_dual_nobypass = {
.has_prescaler_bypass = false,
- .has_rdy = false,
.npwm = 2,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a10s = {
+static const struct sun4i_pwm_data sun4i_pwm_dual_bypass = {
.has_prescaler_bypass = true,
- .has_rdy = true,
.npwm = 2,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a13 = {
+static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
.has_prescaler_bypass = true,
- .has_rdy = true,
- .npwm = 1,
-};
-
-static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
- .has_prescaler_bypass = true,
- .has_rdy = true,
- .npwm = 2,
-};
-
-static const struct sun4i_pwm_data sun4i_pwm_data_h3 = {
- .has_prescaler_bypass = true,
- .has_rdy = true,
.npwm = 1,
};
static const struct of_device_id sun4i_pwm_dt_ids[] = {
{
.compatible = "allwinner,sun4i-a10-pwm",
- .data = &sun4i_pwm_data_a10,
+ .data = &sun4i_pwm_dual_nobypass,
}, {
.compatible = "allwinner,sun5i-a10s-pwm",
- .data = &sun4i_pwm_data_a10s,
+ .data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun5i-a13-pwm",
- .data = &sun4i_pwm_data_a13,
+ .data = &sun4i_pwm_single_bypass,
}, {
.compatible = "allwinner,sun7i-a20-pwm",
- .data = &sun4i_pwm_data_a20,
+ .data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun8i-h3-pwm",
- .data = &sun4i_pwm_data_h3,
+ .data = &sun4i_pwm_single_bypass,
}, {
/* sentinel */
},
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 83f2b0b15712..7c71cdb8a9d8 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -273,7 +273,8 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
ret = device_register(&export->child);
if (ret) {
clear_bit(PWMF_EXPORTED, &pwm->flags);
- kfree(export);
+ put_device(&export->child);
+ export = NULL;
return ret;
}
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
tx->callback = dma_xfer_callback;
tx->callback_param = req;
- req->dmach = chan;
- req->sync = sync;
req->status = DMA_IN_PROGRESS;
- init_completion(&req->req_comp);
kref_get(&req->refcount);
cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req)
return -ENOMEM;
- kref_init(&req->refcount);
-
ret = get_dma_channel(priv);
if (ret) {
kfree(req);
return ret;
}
+ chan = priv->dmach;
+
+ kref_init(&req->refcount);
+ init_completion(&req->req_comp);
+ req->dir = dir;
+ req->filp = filp;
+ req->priv = priv;
+ req->dmach = chan;
+ req->sync = sync;
/*
* If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
xfer->offset, xfer->length);
}
- req->dir = dir;
- req->filp = filp;
- req->priv = priv;
- chan = priv->dmach;
-
nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir);
if (nents == 0) {
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 8e70a627e0bb..cbbafdcaaecb 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -1083,6 +1083,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
dev_err(qproc->dev, "unable to resolve mba region\n");
return ret;
}
+ of_node_put(node);
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
@@ -1100,6 +1101,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
dev_err(qproc->dev, "unable to resolve mpss region\n");
return ret;
}
+ of_node_put(node);
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 6d9c5832ce47..a9609d971f7f 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1163,7 +1163,7 @@ int rproc_trigger_recovery(struct rproc *rproc)
if (ret)
return ret;
- ret = rproc_stop(rproc, false);
+ ret = rproc_stop(rproc, true);
if (ret)
goto unlock_mutex;
@@ -1316,7 +1316,7 @@ void rproc_shutdown(struct rproc *rproc)
if (!atomic_dec_and_test(&rproc->power))
goto out;
- ret = rproc_stop(rproc, true);
+ ret = rproc_stop(rproc, false);
if (ret) {
atomic_inc(&rproc->power);
goto out;
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 360e06b20c53..ac18f2f27881 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -110,7 +110,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
- UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */
+ UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
@@ -127,8 +127,8 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */
UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */
UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
- UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */
- UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */
+ UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link */
+ UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link */
UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 64b6de9763ee..1efdf9ff8679 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
}
module_exit(rpmsg_chrdev_exit);
+
+MODULE_ALIAS("rpmsg:rpmsg_chrdev");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 304e891e35fc..60f2250fd96b 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d;
u64 h_m_s_ms;
@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d = 0;
u64 h_m_s_ms = 0;
tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_write(y_m_d, h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
return rc == OPAL_SUCCESS ? 0 : -EIO;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct alias_lcu *lcu;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
- lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f035c2f25d35..131f1989f6f3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -27,7 +27,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
-#include <asm/diag.h>
#include "dasd_int.h"
#include "dasd_diag.h"
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5f8d9ea69ebd..eceba3858cef 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
* Used to keep track of the size of the event masks. Qemu until version 2.11
* only supports 4 and needs a workaround.
*/
-bool sclp_mask_compat_mode;
+bool sclp_mask_compat_mode __section(.data);
void sclp_early_wait_irq(void)
{
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bfec1485ca23..5535312602af 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -323,6 +323,9 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
struct ccw_dev_id dev_id;
int rc, i;
+ if (num_devices < 1)
+ return -EINVAL;
+
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
@@ -375,7 +378,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
goto error;
}
/* Check if the devices are bound to the required ccw driver. */
- if (gdev->count && gdrv && gdrv->ccw_driver &&
+ if (gdrv && gdrv->ccw_driver &&
gdev->cdev[0]->drv != gdrv->ccw_driver) {
rc = -EINVAL;
goto error;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6886b3d34cf8..5130d7c67239 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
-#include <asm/reset.h>
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
@@ -767,262 +766,6 @@ void cio_register_early_subchannels(void)
}
#endif /* CONFIG_CCW_CONSOLE */
-static int
-__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
-{
- int retry, cc;
-
- cc = 0;
- for (retry=0;retry<3;retry++) {
- schib->pmcw.ena = 0;
- cc = msch(schid, schib);
- if (cc)
- return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
- return -ENODEV;
- if (!schib->pmcw.ena)
- return 0;
- }
- return -EBUSY; /* uhm... */
-}
-
-static int
-__clear_io_subchannel_easy(struct subchannel_id schid)
-{
- int retry;
-
- if (csch(schid))
- return -ENODEV;
- for (retry=0;retry<20;retry++) {
- struct tpi_info ti;
-
- if (tpi(&ti)) {
- tsch(ti.schid, this_cpu_ptr(&cio_irb));
- if (schid_equal(&ti.schid, &schid))
- return 0;
- }
- udelay_simple(100);
- }
- return -EBUSY;
-}
-
-static void __clear_chsc_subchannel_easy(void)
-{
- /* It seems we can only wait for a bit here :/ */
- udelay_simple(100);
-}
-
-static int pgm_check_occured;
-
-static void cio_reset_pgm_check_handler(void)
-{
- pgm_check_occured = 1;
-}
-
-static int stsch_reset(struct subchannel_id schid, struct schib *addr)
-{
- int rc;
-
- pgm_check_occured = 0;
- s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
- s390_base_pgm_handler_fn = NULL;
-
- /* The program check handler could have changed pgm_check_occured. */
- barrier();
-
- if (pgm_check_occured)
- return -EIO;
- else
- return rc;
-}
-
-static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if (!schib.pmcw.ena)
- return 0;
- switch(__disable_subchannel_easy(schid, &schib)) {
- case 0:
- case -ENODEV:
- break;
- default: /* -EBUSY */
- switch (schib.pmcw.st) {
- case SUBCHANNEL_TYPE_IO:
- if (__clear_io_subchannel_easy(schid))
- goto out; /* give up... */
- break;
- case SUBCHANNEL_TYPE_CHSC:
- __clear_chsc_subchannel_easy();
- break;
- default:
- /* No default clear strategy */
- break;
- }
- stsch(schid, &schib);
- __disable_subchannel_easy(schid, &schib);
- }
-out:
- return 0;
-}
-
-static atomic_t chpid_reset_count;
-
-static void s390_reset_chpids_mcck_handler(void)
-{
- struct crw crw;
- union mci mci;
-
- /* Check for pending channel report word. */
- mci.val = S390_lowcore.mcck_interruption_code;
- if (!mci.cp)
- return;
- /* Process channel report words. */
- while (stcrw(&crw) == 0) {
- /* Check for responses to RCHP. */
- if (crw.slct && crw.rsc == CRW_RSC_CPATH)
- atomic_dec(&chpid_reset_count);
- }
-}
-
-#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
-static void css_reset(void)
-{
- int i, ret;
- unsigned long long timeout;
- struct chp_id chpid;
-
- /* Reset subchannels. */
- for_each_subchannel(__shutdown_subchannel_easy, NULL);
- /* Reset channel paths. */
- s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
- /* Enable channel report machine checks. */
- __ctl_set_bit(14, 28);
- /* Temporarily reenable machine checks. */
- local_mcck_enable();
- chp_id_init(&chpid);
- for (i = 0; i <= __MAX_CHPID; i++) {
- chpid.id = i;
- ret = rchp(chpid);
- if ((ret == 0) || (ret == 2))
- /*
- * rchp either succeeded, or another rchp is already
- * in progress. In either case, we'll get a crw.
- */
- atomic_inc(&chpid_reset_count);
- }
- /* Wait for machine check for all channel paths. */
- timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
- while (atomic_read(&chpid_reset_count) != 0) {
- if (get_tod_clock_fast() > timeout)
- break;
- cpu_relax();
- }
- /* Disable machine checks again. */
- local_mcck_disable();
- /* Disable channel report machine checks. */
- __ctl_clear_bit(14, 28);
- s390_base_mcck_handler_fn = NULL;
-}
-
-static struct reset_call css_reset_call = {
- .fn = css_reset,
-};
-
-static int __init init_css_reset_call(void)
-{
- atomic_set(&chpid_reset_count, 0);
- register_reset_call(&css_reset_call);
- return 0;
-}
-
-arch_initcall(init_css_reset_call);
-
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- struct sch_match_id *match_id = data;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- return 1;
- }
- return 0;
-}
-
-static int reipl_find_schid(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
-{
- struct sch_match_id match_id;
-
- match_id.devid = *devid;
- match_id.rc = -ENODEV;
- for_each_subchannel(__reipl_subchannel_match, &match_id);
- if (match_id.rc == 0)
- *schid = match_id.schid;
- return match_id.rc;
-}
-
-extern void do_reipl_asm(__u32 schid);
-
-/* Make sure all subchannels are quiet before we re-ipl an lpar. */
-void reipl_ccw_dev(struct ccw_dev_id *devid)
-{
- struct subchannel_id uninitialized_var(schid);
-
- s390_reset_system();
- if (reipl_find_schid(devid, &schid) != 0)
- panic("IPL Device not found\n");
- do_reipl_asm(*((__u32*)&schid));
-}
-
-int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
-{
- static struct chsc_sda_area sda_area __initdata;
- struct subchannel_id schid;
- struct schib schib;
-
- schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
- if (!schid.one)
- return -ENODEV;
-
- if (schid.ssid) {
- /*
- * Firmware should have already enabled MSS but whoever started
- * the kernel might have initiated a channel subsystem reset.
- * Ensure that MSS is enabled.
- */
- memset(&sda_area, 0, sizeof(sda_area));
- if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
- return -ENODEV;
- }
- if (stsch(schid, &schib))
- return -ENODEV;
- if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
- return -ENODEV;
- if (!schib.pmcw.dnv)
- return -ENODEV;
-
- iplinfo->ssid = schid.ssid;
- iplinfo->devno = schib.pmcw.dev;
- iplinfo->is_qdio = schib.pmcw.qf;
- return 0;
-}
-
/**
* cio_tm_start_key - perform start function
* @sch: subchannel on which to perform the start function
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4fa9ee1d09fa..14d328338ce2 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -183,30 +183,6 @@ int chsc(void *chsc_area)
}
EXPORT_SYMBOL(chsc);
-static inline int __rchp(struct chp_id chpid)
-{
- register struct chp_id reg1 asm ("1") = chpid;
- int ccode;
-
- asm volatile(
- " lr 1,%1\n"
- " rchp\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
- return ccode;
-}
-
-int rchp(struct chp_id chpid)
-{
- int ccode;
-
- ccode = __rchp(chpid);
- trace_s390_cio_rchp(chpid, ccode);
-
- return ccode;
-}
-
static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 35ad4ddd61e0..4be539cb9adc 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -20,7 +20,6 @@ int ssch(struct subchannel_id schid, union orb *addr);
int csch(struct subchannel_id schid);
int tpi(struct tpi_info *addr);
int chsc(void *chsc_area);
-int rchp(struct chp_id chpid);
int rsch(struct subchannel_id schid);
int hsch(struct subchannel_id schid);
int xsch(struct subchannel_id schid);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a337281337a7..f4ca72dd862f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1207,8 +1207,10 @@ no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler)
+ if ((void *)cdev->handler == (void *)qdio_int_handler) {
cdev->handler = irq_ptr->orig_handler;
+ cdev->private->intparm = 0;
+ }
spin_unlock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 98f3cfdc0d02..4c14ce428e92 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
int i;
for (i = 0; i < nr_queues; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
- int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,26 +492,23 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
+ spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
return 0;
-out_err:
- qdio_release_memory(irq_ptr);
- return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 2c7550797ec2..dce92b2a895d 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
* and stores the result to ccwchain list. @cp must have been
* initialized by a previous call with cp_init(). Otherwise, undefined
* behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
*
* The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
* as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
for (idx = 0; idx < len; idx++) {
ret = ccwchain_fetch_one(chain, idx, cp);
if (ret)
- return ret;
+ goto out_err;
}
}
return 0;
+out_err:
+ /* Only cleanup the chain elements that were actually translated. */
+ chain->ch_len = idx;
+ list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+ chain->ch_len = 0;
+ }
+ return ret;
}
/**
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
int ccode;
__u8 lpm;
unsigned long flags;
+ int ret;
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
private->state = VFIO_CCW_STATE_BUSY;
- spin_unlock_irqrestore(sch->lock, flags);
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
- return 0;
+ ret = 0;
+ break;
case 1: /* Status pending */
case 2: /* Busy */
- return -EBUSY;
+ ret = -EBUSY;
+ break;
case 3: /* Device/path not operational */
{
lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch->lpm = 0;
if (cio_update_schib(sch))
- return -ENODEV;
-
- return sch->lpm ? -EACCES : -ENODEV;
+ ret = -ENODEV;
+ else
+ ret = sch->lpm ? -EACCES : -ENODEV;
+ break;
}
default:
- return ccode;
+ ret = ccode;
}
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
}
static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48d55dc9e986..35a0c2b52f82 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -25,7 +25,6 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <asm/reset.h>
#include <asm/airq.h>
#include <linux/atomic.h>
#include <asm/isc.h>
@@ -1197,26 +1196,7 @@ static void ap_config_timeout(struct timer_list *unused)
queue_work(system_long_wq, &ap_scan_work);
}
-static void ap_reset_all(void)
-{
- int i, j;
-
- for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i))
- continue;
- for (j = 0; j < AP_DEVICES; j++) {
- if (!ap_test_config_card_id(j))
- continue;
- ap_rapq(AP_MKQID(j, i));
- }
- }
-}
-
-static struct reset_call ap_reset_call = {
- .fn = ap_reset_all,
-};
-
-int __init ap_debug_init(void)
+static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
@@ -1226,17 +1206,12 @@ int __init ap_debug_init(void)
return 0;
}
-void ap_debug_exit(void)
-{
- debug_unregister(ap_dbf_info);
-}
-
/**
* ap_module_init(): The module initialization code.
*
* Initializes the module.
*/
-int __init ap_module_init(void)
+static int __init ap_module_init(void)
{
int max_domain_id;
int rc, i;
@@ -1274,8 +1249,6 @@ int __init ap_module_init(void)
ap_airq_flag = (rc == 0);
}
- register_reset_call(&ap_reset_call);
-
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
@@ -1331,7 +1304,6 @@ out_bus:
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
out:
- unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
kfree(ap_configuration);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index e0827eaa42f1..02184cf35834 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <asm/ap.h>
-#define AP_DEVICES 64 /* Number of AP devices. */
+#define AP_DEVICES 256 /* Number of AP devices. */
#define AP_DOMAINS 256 /* Number of AP domains. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -240,7 +240,4 @@ void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
-int ap_module_init(void);
-void ap_module_exit(void);
-
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index 6a9d77c75ec3..dc675eb5aef6 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -23,7 +23,4 @@
extern debug_info_t *ap_dbf_info;
-int ap_debug_init(void);
-void ap_debug_exit(void);
-
#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index e7c2e4f9529a..ed80d00cdb6f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -889,7 +889,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain, int verify)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_matrix *device_matrix;
+ struct zcrypt_device_status_ext *device_status;
u16 card, dom;
u64 mkvp[2];
int i, rc, oi = -1;
@@ -899,18 +899,19 @@ int pkey_findcard(const struct pkey_seckey *seckey,
return -EINVAL;
/* fetch status of all crypto cards */
- device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
+ device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
- if (!device_matrix)
+ if (!device_status)
return -ENOMEM;
- zcrypt_device_status_mask(device_matrix);
+ zcrypt_device_status_mask_ext(device_status);
/* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
- if (device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04) {
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
@@ -930,14 +931,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
mkvp_cache_scrub(card, dom);
}
}
- if (i >= MAX_ZDEV_ENTRIES) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
/* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- if (!(device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04))
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
continue;
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
/* fresh fetch mkvp from adapter */
if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
@@ -947,13 +948,13 @@ int pkey_findcard(const struct pkey_seckey *seckey,
oi = i;
}
}
- if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
/* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_matrix->device[oi].qid);
- dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
}
}
- if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
@@ -962,7 +963,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
} else
rc = -ENODEV;
- kfree(device_matrix);
+ kfree(device_status);
return rc;
}
EXPORT_SYMBOL(pkey_findcard);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ce15f101ee28..5efd84862ccb 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -18,8 +18,6 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
@@ -607,19 +605,24 @@ out:
return rc;
}
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status));
- memset(matrix, 0, sizeof(*matrix));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- stat = matrix->device;
- stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
- stat += AP_QID_QUEUE(zq->queue->qid);
+ card = AP_QID_CARD(zq->queue->qid);
+ if (card >= MAX_ZDEV_CARDIDS)
+ continue;
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
@@ -628,40 +631,70 @@ void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
}
spin_unlock(&zcrypt_list_lock);
}
-EXPORT_SYMBOL(zcrypt_device_status_mask);
-static void zcrypt_status_mask(char status[AP_DEVICES])
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ struct zcrypt_device_status_ext *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext));
- memset(status, 0, sizeof(char) * AP_DEVICES);
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
+static void zcrypt_status_mask(char status[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+
+ memset(status, 0, max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
- status[AP_QID_CARD(zq->queue->qid)] =
- zc->online ? zc->user_space_type : 0x0d;
+ status[card] = zc->online ? zc->user_space_type : 0x0d;
}
}
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
+static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(qdepth, 0, sizeof(char) * AP_DEVICES);
+ memset(qdepth, 0, max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- qdepth[AP_QID_CARD(zq->queue->qid)] =
+ qdepth[card] =
zq->queue->pendingq_count +
zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
@@ -671,21 +704,23 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
+static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
+ memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[AP_QID_CARD(zq->queue->qid)] =
- zq->queue->total_request_count;
+ reqcnt[card] = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
}
}
@@ -739,60 +774,10 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
-static int zcrypt_count_type(int type)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
- int device_count;
-
- device_count = 0;
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- if (zc->card->id != type)
- continue;
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- device_count++;
- }
- }
- spin_unlock(&zcrypt_list_lock);
- return device_count;
-}
-
-/**
- * zcrypt_ica_status(): Old, depracted combi status call.
- *
- * Old, deprecated combi status call.
- */
-static long zcrypt_ica_status(struct file *filp, unsigned long arg)
-{
- struct ica_z90_status *pstat;
- int ret;
-
- pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
- if (!pstat)
- return -ENOMEM;
- pstat->totalcount = zcrypt_device_count;
- pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
- pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
- pstat->requestqWaitCount = zcrypt_requestq_count();
- pstat->pendingqWaitCount = zcrypt_pendingq_count();
- pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
- pstat->cryptoDomain = ap_domain_index;
- zcrypt_status_mask(pstat->status);
- zcrypt_qdepth_mask(pstat->qdepth);
- ret = 0;
- if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
- ret = -EFAULT;
- kfree(pstat);
- return ret;
-}
-
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- int rc;
+ int rc = 0;
switch (cmd) {
case ICARSAMODEXPO: {
@@ -871,48 +856,48 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
- case ZDEVICESTATUS: {
- struct zcrypt_device_matrix *device_status;
+ case ZCRYPT_DEVICE_STATUS: {
+ struct zcrypt_device_status_ext *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext);
- device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
- GFP_KERNEL);
+ device_status = kzalloc(total_size, GFP_KERNEL);
if (!device_status)
return -ENOMEM;
-
- zcrypt_device_status_mask(device_status);
-
+ zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *) arg, device_status,
- sizeof(struct zcrypt_device_matrix))) {
- kfree(device_status);
- return -EFAULT;
- }
-
+ total_size))
+ rc = -EFAULT;
kfree(device_status);
- return 0;
+ return rc;
}
- case Z90STAT_STATUS_MASK: {
+ case ZCRYPT_STATUS_MASK: {
char status[AP_DEVICES];
- zcrypt_status_mask(status);
- if (copy_to_user((char __user *) arg, status,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_status_mask(status, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
- case Z90STAT_QDEPTH_MASK: {
+ case ZCRYPT_QDEPTH_MASK: {
char qdepth[AP_DEVICES];
- zcrypt_qdepth_mask(qdepth);
- if (copy_to_user((char __user *) arg, qdepth,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_qdepth_mask(qdepth, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
- case Z90STAT_PERDEV_REQCNT: {
- int reqcnt[AP_DEVICES];
- zcrypt_perdev_reqcnt(reqcnt);
- if (copy_to_user((int __user *) arg, reqcnt,
- sizeof(int) * AP_DEVICES))
- return -EFAULT;
- return 0;
+ case ZCRYPT_PERDEV_REQCNT: {
+ int *reqcnt;
+
+ reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ if (!reqcnt)
+ return -ENOMEM;
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ rc = -EFAULT;
+ kfree(reqcnt);
+ return rc;
}
case Z90STAT_REQUESTQ_COUNT:
return put_user(zcrypt_requestq_count(), (int __user *) arg);
@@ -924,38 +909,54 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *) arg);
/*
- * Deprecated ioctls. Don't add another device count ioctl,
- * you can count them yourself in the user space with the
- * output of the Z90STAT_STATUS_MASK ioctl.
+ * Deprecated ioctls
*/
- case ICAZ90STATUS:
- return zcrypt_ica_status(filp, arg);
- case Z90STAT_TOTALCOUNT:
- return put_user(zcrypt_device_count, (int __user *) arg);
- case Z90STAT_PCICACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICA),
- (int __user *) arg);
- case Z90STAT_PCICCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICC),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL2COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL3COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_PCIXCCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_CEX2CCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
- (int __user *) arg);
- case Z90STAT_CEX2ACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
- (int __user *) arg);
+ case ZDEVICESTATUS: {
+ /* the old ioctl supports only 64 adapters */
+ struct zcrypt_device_status *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status);
+
+ device_status = kzalloc(total_size, GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask(device_status);
+ if (copy_to_user((char __user *) arg, device_status,
+ total_size))
+ rc = -EFAULT;
+ kfree(device_status);
+ return rc;
+ }
+ case Z90STAT_STATUS_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char status[MAX_ZDEV_CARDIDS];
+
+ zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_QDEPTH_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char qdepth[MAX_ZDEV_CARDIDS];
+
+ zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ /* the old ioctl supports only 64 adapters */
+ int reqcnt[MAX_ZDEV_CARDIDS];
+
+ zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ return -EFAULT;
+ return 0;
+ }
+ /* unknown ioctl number */
default:
- /* unknown ioctl number */
+ ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1152,201 +1153,6 @@ static struct miscdevice zcrypt_misc_device = {
.fops = &zcrypt_fops,
};
-/*
- * Deprecated /proc entry support.
- */
-static struct proc_dir_entry *zcrypt_entry;
-
-static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- seq_printf(m, "%01x", (unsigned int) addr[i]);
- seq_putc(m, ' ');
-}
-
-static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int inl, c, cx;
-
- seq_printf(m, " ");
- inl = 0;
- for (c = 0; c < (len / 16); c++) {
- sprintcl(m, addr+inl, 16);
- inl += 16;
- }
- cx = len%16;
- if (cx) {
- sprintcl(m, addr+inl, cx);
- inl += cx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx(unsigned char *title, struct seq_file *m,
- unsigned char *addr, unsigned int len)
-{
- int inl, r, rx;
-
- seq_printf(m, "\n%s\n", title);
- inl = 0;
- for (r = 0; r < (len / 64); r++) {
- sprintrw(m, addr+inl, 64);
- inl += 64;
- }
- rx = len % 64;
- if (rx) {
- sprintrw(m, addr+inl, rx);
- inl += rx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx4(unsigned char *title, struct seq_file *m,
- unsigned int *array, unsigned int len)
-{
- seq_printf(m, "\n%s\n", title);
- seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
- seq_putc(m, '\n');
-}
-
-static int zcrypt_proc_show(struct seq_file *m, void *v)
-{
- char workarea[sizeof(int) * AP_DEVICES];
-
- seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
- ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
- seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
- seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
- seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
- seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
- seq_printf(m, "PCIXCC MCL2 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
- seq_printf(m, "PCIXCC MCL3 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
- seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
- seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
- seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
- seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
- seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
- seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
- seq_printf(m, "Total open handles: %d\n\n",
- atomic_read(&zcrypt_open_count));
- zcrypt_status_mask(workarea);
- sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
- m, workarea, AP_DEVICES);
- zcrypt_qdepth_mask(workarea);
- sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
- zcrypt_perdev_reqcnt((int *) workarea);
- sprinthx4("Per-device successfully completed request counts",
- m, (unsigned int *) workarea, AP_DEVICES);
- return 0;
-}
-
-static int zcrypt_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zcrypt_proc_show, NULL);
-}
-
-static void zcrypt_disable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 0;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static void zcrypt_enable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 1;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- unsigned char *lbuf, *ptr;
- size_t local_count;
- int j;
-
- if (count <= 0)
- return 0;
-
-#define LBUFSIZE 1200UL
- lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
- if (!lbuf)
- return 0;
-
- local_count = min(LBUFSIZE - 1, count);
- if (copy_from_user(lbuf, buffer, local_count) != 0) {
- kfree(lbuf);
- return -EFAULT;
- }
- lbuf[local_count] = '\0';
-
- ptr = strstr(lbuf, "Online devices");
- if (!ptr)
- goto out;
- ptr = strstr(ptr, "\n");
- if (!ptr)
- goto out;
- ptr++;
-
- if (strstr(ptr, "Waiting work element counts") == NULL)
- goto out;
-
- for (j = 0; j < 64 && *ptr; ptr++) {
- /*
- * '0' for no device, '1' for PCICA, '2' for PCICC,
- * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
- * '5' for CEX2C and '6' for CEX2A'
- * '7' for CEX3C and '8' for CEX3A
- */
- if (*ptr >= '0' && *ptr <= '8')
- j++;
- else if (*ptr == 'd' || *ptr == 'D')
- zcrypt_disable_card(j++);
- else if (*ptr == 'e' || *ptr == 'E')
- zcrypt_enable_card(j++);
- else if (*ptr != ' ' && *ptr != '\t')
- break;
- }
-out:
- kfree(lbuf);
- return count;
-}
-
-static const struct file_operations zcrypt_proc_fops = {
- .owner = THIS_MODULE,
- .open = zcrypt_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = zcrypt_proc_write,
-};
-
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
@@ -1448,27 +1254,15 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
- atomic_set(&zcrypt_rescan_req, 0);
-
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
goto out;
- /* Set up the proc file system */
- zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
- &zcrypt_proc_fops);
- if (!zcrypt_entry) {
- rc = -ENOMEM;
- goto out_misc;
- }
-
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
return 0;
-out_misc:
- misc_deregister(&zcrypt_misc_device);
out:
return rc;
}
@@ -1480,7 +1274,6 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
- remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9fff8912f6e3..f149a8fee60d 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -21,30 +21,6 @@
#include <asm/zcrypt.h>
#include "ap_bus.h"
-/* deprecated status calls */
-#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
-#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
-
-/**
- * This structure is deprecated and the corresponding ioctl() has been
- * replaced with individual ioctl()s for each piece of data!
- */
-struct ica_z90_status {
- int totalcount;
- int leedslitecount; // PCICA
- int leeds2count; // PCICC
- // int PCIXCCCount; is not in struct for backward compatibility
- int requestqWaitCount;
- int pendingqWaitCount;
- int totalOpenCount;
- int cryptoDomain;
- // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
- // 5=CEX2C
- unsigned char status[64];
- // qdepth: # work elements waiting for each device
- unsigned char qdepth[64];
-};
-
/**
* device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
* PCIXCC_MCL3, CEX2C, or CEX2A
@@ -179,6 +155,6 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0ee8f33efb54..2d9fe7e4ee40 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1928,6 +1928,8 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
return -EINVAL;
/* TODO: sanity checks */
card->portno = value;
+ if (card->dev)
+ card->dev->dev_port = card->portno;
return count;
@@ -2158,6 +2160,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev = dev;
card->dev->ml_priv = card;
card->dev->netdev_ops = &lcs_netdev_ops;
+ card->dev->dev_port = card->portno;
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..2a5fec55bf60 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -148,6 +148,7 @@ struct qeth_perf_stats {
unsigned int tx_csum;
unsigned int tx_lin;
unsigned int tx_linfail;
+ unsigned int rx_csum;
};
/* Routing stuff */
@@ -557,7 +558,6 @@ enum qeth_prot_versions {
enum qeth_cmd_buffer_state {
BUF_STATE_FREE,
BUF_STATE_LOCKED,
- BUF_STATE_PROCESSED,
};
enum qeth_cq {
@@ -601,7 +601,6 @@ struct qeth_channel {
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
atomic_t irq_pending;
int io_buf_no;
- int buf_no;
};
/**
@@ -714,9 +713,6 @@ enum qeth_discipline_id {
struct qeth_discipline {
const struct device_type *devtype;
- void (*start_poll)(struct ccw_device *, int, unsigned long);
- qdio_handler_t *input_handler;
- qdio_handler_t *output_handler;
int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
@@ -782,9 +778,9 @@ struct qeth_card {
struct qeth_card_options options;
wait_queue_head_t wait_q;
- spinlock_t vlanlock;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct mutex vid_list_mutex; /* vid_list */
struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
@@ -869,6 +865,32 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
+ u8 flags)
+{
+ if ((card->dev->features & NETIF_F_RXCSUM) &&
+ (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (card->options.performance_stats)
+ card->perf_stats.rx_csum++;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
+{
+ *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
+ *flags |= QETH_HDR_EXT_UDP;
+ if (ipv == 4) {
+ /* some HW requires combined L3+L4 csum offload: */
+ *flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+ ip_hdr(skb)->check = 0;
+ }
+}
+
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
@@ -881,6 +903,27 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
return card->info.diagass_support & (__u32)cmd;
}
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data,
+ enum qeth_prot_versions prot);
+/* IPv4 variant */
+static inline int qeth_send_simple_setassparms(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data)
+{
+ return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+ data, QETH_PROT_IPV4);
+}
+
+static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data)
+{
+ return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
+ data, QETH_PROT_IPV6);
+}
+
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -923,13 +966,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
-void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_qdio_input_handler(struct ccw_device *,
- unsigned int, unsigned int, int,
- int, unsigned long);
-void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
- int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
@@ -981,8 +1018,6 @@ int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
-int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
- __u16, long);
int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
long,
int (*reply_cb)(struct qeth_card *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 04fefa5bb08d..06415b6a8f68 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_put_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
- atomic_set(&card->write.irq_pending, 0);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
qeth_release_buffer(channel, &channel->iob[cnt]);
- channel->buf_no = 0;
channel->io_buf_no = 0;
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
- channel->buf_no = 0;
channel->io_buf_no = 0;
atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
{
int rc;
int cstat, dstat;
- struct qeth_cmd_buffer *buffer;
+ struct qeth_cmd_buffer *iob = NULL;
struct qeth_channel *channel;
struct qeth_card *card;
- struct qeth_cmd_buffer *iob;
- __u8 index;
-
- if (__qeth_check_irb_error(cdev, intparm, irb))
- return;
- cstat = irb->scsw.cmd.cstat;
- dstat = irb->scsw.cmd.dstat;
card = CARD_FROM_CDEV(cdev);
if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel = &card->data;
QETH_CARD_TEXT(card, 5, "data");
}
+
+ if (qeth_intparm_is_iob(intparm))
+ iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+
+ if (__qeth_check_irb_error(cdev, intparm, irb)) {
+ /* IO was terminated, free its resources. */
+ if (iob)
+ qeth_release_buffer(iob->channel, iob);
+ atomic_set(&channel->irq_pending, 0);
+ wake_up(&card->wait_q);
+ return;
+ }
+
atomic_set(&channel->irq_pending, 0);
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
/* we don't have to handle this further */
intparm = 0;
}
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel->state = CH_STATE_RCD_DONE;
goto out;
}
- if (intparm) {
- buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- buffer->state = BUF_STATE_PROCESSED;
- }
if (channel == &card->data)
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
__qeth_issue_next_read(card);
- iob = channel->iob;
- index = channel->buf_no;
- while (iob[index].state == BUF_STATE_PROCESSED) {
- if (iob[index].callback != NULL)
- iob[index].callback(channel, iob + index);
+ if (iob && iob->callback)
+ iob->callback(iob->channel, iob);
- index = (index + 1) % QETH_CMD_BUFFER_NO;
- }
- channel->buf_no = index;
out:
wake_up(&card->wait_q);
return;
@@ -1470,13 +1467,13 @@ static int qeth_setup_card(struct qeth_card *card)
card->lan_online = 0;
card->read_or_write_problem = 0;
card->dev = NULL;
- spin_lock_init(&card->vlanlock);
spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
+ mutex_init(&card->vid_list_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
if (channel->state != CH_STATE_UP) {
rc = -ETIME;
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- qeth_clear_cmd_buffers(channel);
} else
rc = 0;
return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_start(channel->ccwdev,
- &channel->ccw, (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+ (addr_t) iob, 0, 0, QETH_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- qeth_clear_cmd_buffers(channel);
return -ETIME;
}
return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, event_timeout);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
}
- if (reply->rc == -EIO)
- goto error;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -2211,10 +2204,6 @@ time_err:
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
atomic_inc(&reply->received);
-error:
- atomic_set(&card->write.irq_pending, 0);
- qeth_release_buffer(iob->channel, iob);
- card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
return rc;
}
-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "defadpcb");
-
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0)
+ if (!cmd->hdr.return_code)
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- return 0;
+ return cmd->hdr.return_code;
}
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 3, "quyadpcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
- return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+ return 0;
}
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- struct qeth_switch_info *sw_info;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_query_switch_attributes *attrs;
+ struct qeth_switch_info *sw_info;
QETH_CARD_TEXT(card, 2, "qswiatcb");
- cmd = (struct qeth_ipa_cmd *) data;
- sw_info = (struct qeth_switch_info *)reply->param;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
- sw_info->capabilities = attrs->capabilities;
- sw_info->settings = attrs->settings;
- QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
- sw_info->settings);
- }
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
+ sw_info = (struct qeth_switch_info *)reply->param;
+ attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+ sw_info->capabilities = attrs->capabilities;
+ sw_info->settings = attrs->settings;
+ QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+ sw_info->settings);
return 0;
}
@@ -3606,15 +3588,14 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
}
}
-void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
- unsigned long card_ptr)
+static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev && (card->dev->flags & IFF_UP))
napi_schedule(&card->napi);
}
-EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
@@ -3716,9 +3697,10 @@ out:
return;
}
-void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
- unsigned int queue, int first_elem, int count,
- unsigned long card_ptr)
+static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
+ unsigned int qdio_err, int queue,
+ int first_elem, int count,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
@@ -3729,14 +3711,12 @@ void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
else if (qdio_err)
qeth_schedule_recovery(card);
-
-
}
-EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
-void qeth_qdio_output_handler(struct ccw_device *ccwdev,
- unsigned int qdio_error, int __queue, int first_element,
- int count, unsigned long card_ptr)
+static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
+ unsigned int qdio_error, int __queue,
+ int first_element, int count,
+ unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -3805,7 +3785,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
card->perf_stats.outbound_handler_time += qeth_get_micros() -
card->perf_stats.outbound_handler_start_time;
}
-EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
@@ -4207,16 +4186,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_setadpparms *setparms;
QETH_CARD_TEXT(card, 4, "prmadpcb");
- cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
-
- qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
- if (cmd->hdr.return_code) {
+ if (qeth_setadpparms_inspect_rc(cmd)) {
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
@@ -4286,18 +4262,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 4, "chgmaccb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
ether_addr_copy(card->dev->dev_addr,
cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4328,13 +4304,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_set_access_ctrl *access_ctrl_req;
int fallback = *(int *)reply->param;
QETH_CARD_TEXT(card, 4, "setaccb");
+ if (cmd->hdr.return_code)
+ return 0;
+ qeth_setadpparms_inspect_rc(cmd);
- cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_DBF_TEXT_(SETUP, 2, "setaccb");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4385,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
card->options.isolation = card->options.prev_isolation;
break;
}
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
@@ -4695,14 +4672,15 @@ out:
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_qoat_priv *priv;
char *resdata;
int resdatalen;
QETH_CARD_TEXT(card, 3, "qoatcb");
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- cmd = (struct qeth_ipa_cmd *)data;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
@@ -4796,21 +4774,18 @@ out:
static int qeth_query_card_info_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
struct qeth_query_card_info *card_info;
- struct carrier_info *carrier_info;
QETH_CARD_TEXT(card, 2, "qcrdincb");
- carrier_info = (struct carrier_info *)reply->param;
- cmd = (struct qeth_ipa_cmd *)data;
- card_info = &cmd->data.setadapterparms.data.card_info;
- if (cmd->data.setadapterparms.hdr.return_code == 0) {
- carrier_info->card_type = card_info->card_type;
- carrier_info->port_mode = card_info->port_mode;
- carrier_info->port_speed = card_info->port_speed;
- }
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return 0;
- qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ card_info = &cmd->data.setadapterparms.data.card_info;
+ carrier_info->card_type = card_info->card_type;
+ carrier_info->port_mode = card_info->port_mode;
+ carrier_info->port_speed = card_info->port_speed;
return 0;
}
@@ -4857,7 +4832,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
+ ccw_device_get_id(CARD_RDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
@@ -5017,7 +4992,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
goto out_free_in_sbals;
}
for (i = 0; i < card->qdio.no_in_queues; ++i)
- queue_start_poll[i] = card->discipline->start_poll;
+ queue_start_poll[i] = qeth_qdio_start_poll;
qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
@@ -5041,8 +5016,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
- init_data.input_handler = card->discipline->input_handler;
- init_data.output_handler = card->discipline->output_handler;
+ init_data.input_handler = qeth_qdio_input_handler;
+ init_data.output_handler = qeth_qdio_output_handler;
init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
@@ -5226,6 +5201,11 @@ retriable:
rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
if (rc == -ENOMEM)
goto out;
+ if (qeth_is_supported(card, IPA_IPV6)) {
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
+ if (rc == -ENOMEM)
+ goto out;
+ }
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
@@ -5533,26 +5513,26 @@ int qeth_send_setassparms(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_send_setassparms);
-int qeth_send_simple_setassparms(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func,
- __u16 cmd_code, long data)
+int qeth_send_simple_setassparms_prot(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ u16 cmd_code, long data,
+ enum qeth_prot_versions prot)
{
int rc;
int length = 0;
struct qeth_cmd_buffer *iob;
- QETH_CARD_TEXT(card, 4, "simassp4");
+ QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
if (data)
length = sizeof(__u32);
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- length, QETH_PROT_IPV4);
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob, length, data,
qeth_setassparms_cb, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms);
+EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
static void qeth_unregister_dbf_views(void)
{
@@ -6030,7 +6010,8 @@ static struct {
{"tx lin"},
{"tx linfail"},
{"cq handler count"},
- {"cq handler time"}
+ {"cq handler time"},
+ {"rx csum"}
};
int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -6092,6 +6073,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[35] = card->perf_stats.tx_linfail;
data[36] = card->perf_stats.cq_cnt;
data[37] = card->perf_stats.cq_time;
+ data[38] = card->perf_stats.rx_csum;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
@@ -6348,14 +6330,15 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
__u16 cmd_code, long data,
- struct qeth_checksum_cmd *chksum_cb)
+ struct qeth_checksum_cmd *chksum_cb,
+ enum qeth_prot_versions prot)
{
struct qeth_cmd_buffer *iob;
int rc = -ENOMEM;
QETH_CARD_TEXT(card, 4, "chkdocmd");
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- sizeof(__u32), QETH_PROT_IPV4);
+ sizeof(__u32), prot);
if (iob)
rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
qeth_ipa_checksum_run_cmd_cb,
@@ -6363,16 +6346,17 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
return rc;
}
-static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
+static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
+ enum qeth_prot_versions prot)
{
- const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
- QETH_IPA_CHECKSUM_UDP |
- QETH_IPA_CHECKSUM_TCP;
+ u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
struct qeth_checksum_cmd chksum_cb;
int rc;
+ if (prot == QETH_PROT_IPV4)
+ required_features |= QETH_IPA_CHECKSUM_IP_HDR;
rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
- &chksum_cb);
+ &chksum_cb, prot);
if (!rc) {
if ((required_features & chksum_cb.supported) !=
required_features)
@@ -6384,37 +6368,42 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
QETH_CARD_IFNAME(card));
}
if (rc) {
- qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
+ qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
dev_warn(&card->gdev->dev,
- "Starting HW checksumming for %s failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
+ prot, QETH_CARD_IFNAME(card));
return rc;
}
rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
- chksum_cb.supported, &chksum_cb);
+ chksum_cb.supported, &chksum_cb,
+ prot);
if (!rc) {
if ((required_features & chksum_cb.enabled) !=
required_features)
rc = -EIO;
}
if (rc) {
- qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
+ qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0, prot);
dev_warn(&card->gdev->dev,
- "Enabling HW checksumming for %s failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
+ prot, QETH_CARD_IFNAME(card));
return rc;
}
- dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n",
- cstype == IPA_INBOUND_CHECKSUM ? "in" : "out");
+ dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
+ cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
return 0;
}
-static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
+static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
+ enum qeth_prot_versions prot)
{
- int rc = (on) ? qeth_send_checksum_on(card, cstype)
- : qeth_send_simple_setassparms(card, cstype,
- IPA_CMD_ASS_STOP, 0);
+ int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
+ : qeth_send_simple_setassparms_prot(card, cstype,
+ IPA_CMD_ASS_STOP, 0,
+ prot);
return rc ? -EIO : 0;
}
@@ -6441,8 +6430,31 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
-#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO)
+static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
+{
+ int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
+ int rc_ipv6;
+
+ if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+ rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+ QETH_PROT_IPV4);
+ if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
+ /* no/one Offload Assist available, so the rc is trivial */
+ return rc_ipv4;
+
+ rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
+ QETH_PROT_IPV6);
+ if (on)
+ /* enable: success if any Assist is active */
+ return (rc_ipv6) ? rc_ipv4 : 0;
+
+ /* disable: failure if any Assist is still active */
+ return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
+}
+
+#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
+ NETIF_F_IPV6_CSUM)
/**
* qeth_recover_features() - Restore device features after recovery
* @dev: the recovering net_device
@@ -6477,16 +6489,19 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
if ((changed & NETIF_F_IP_CSUM)) {
- rc = qeth_set_ipa_csum(card,
- features & NETIF_F_IP_CSUM ? 1 : 0,
- IPA_OUTBOUND_CHECKSUM);
+ rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_IP_CSUM;
}
- if ((changed & NETIF_F_RXCSUM)) {
- rc = qeth_set_ipa_csum(card,
- features & NETIF_F_RXCSUM ? 1 : 0,
- IPA_INBOUND_CHECKSUM);
+ if (changed & NETIF_F_IPV6_CSUM) {
+ rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
+ IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
+ if (rc)
+ changed ^= NETIF_F_IPV6_CSUM;
+ }
+ if (changed & NETIF_F_RXCSUM) {
+ rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
if (rc)
changed ^= NETIF_F_RXCSUM;
}
@@ -6513,7 +6528,10 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
QETH_DBF_TEXT(SETUP, 2, "fixfeat");
if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
features &= ~NETIF_F_IP_CSUM;
- if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+ if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
+ features &= ~NETIF_F_IPV6_CSUM;
+ if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
+ !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
@@ -6563,10 +6581,14 @@ static int __init qeth_core_init(void)
mutex_init(&qeth_mod_mutex);
qeth_wq = create_singlethread_workqueue("qeth_wq");
+ if (!qeth_wq) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
rc = qeth_register_dbf_views();
if (rc)
- goto out_err;
+ goto dbf_err;
qeth_core_root_dev = root_device_register("qeth");
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
@@ -6603,6 +6625,8 @@ slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
qeth_unregister_dbf_views();
+dbf_err:
+ destroy_workqueue(qeth_wq);
out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..878e62f35169 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_HALT_CHANNEL_PARM -11
#define QETH_RCD_PARM -12
+static inline bool qeth_intparm_is_iob(unsigned long intparm)
+{
+ switch (intparm) {
+ case QETH_CLEAR_CHANNEL_PARM:
+ case QETH_HALT_CHANNEL_PARM:
+ case QETH_RCD_PARM:
+ case 0:
+ return false;
+ }
+ return true;
+}
+
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/
@@ -234,6 +246,8 @@ enum qeth_ipa_funcs {
IPA_QUERY_ARP_ASSIST = 0x00040000L,
IPA_INBOUND_TSO = 0x00080000L,
IPA_OUTBOUND_TSO = 0x00100000L,
+ IPA_INBOUND_CHECKSUM_V6 = 0x00400000L,
+ IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L,
};
/* SETIP/DELIP IPA Command: ***************************************************/
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index ae81534de912..c3f18afb368b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -144,6 +144,8 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
goto out;
}
card->info.portno = portno;
+ if (card->dev)
+ card->dev->dev_port = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 50a313806dde..a7cb37da6a21 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,11 +17,9 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
-#include <linux/ip.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
-#include <linux/string.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -122,13 +120,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Setmac");
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- ether_addr_copy(card->dev->dev_addr, mac);
dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ mac, card->dev->name);
} else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (rc) {
case -EEXIST:
dev_warn(&card->gdev->dev,
@@ -143,19 +138,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -212,23 +194,6 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNSPEC;
}
-static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* tcph->check contains already the pseudo hdr checksum
- * so just set the header flags
- */
- if (iph->protocol == IPPROTO_UDP)
- hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_UDP;
- hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
- QETH_HDR_EXT_CSUM_HDR_REQ;
- iph->check = 0;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
-}
-
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
int cast_type, unsigned int data_len)
{
@@ -314,12 +279,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
static void qeth_l2_process_vlans(struct qeth_card *card)
{
struct qeth_vlan_vid *id;
+
QETH_CARD_TEXT(card, 3, "L2prcvln");
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_for_each_entry(id, &card->vid_list, list) {
qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
}
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
}
static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
@@ -336,7 +302,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "aidREC");
return 0;
}
- id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
if (id) {
id->vid = vid;
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
@@ -344,9 +310,9 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
kfree(id);
return rc;
}
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_add_tail(&id->list, &card->vid_list);
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
} else {
return -ENOMEM;
}
@@ -365,7 +331,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
- spin_lock_bh(&card->vlanlock);
+ mutex_lock(&card->vid_list_mutex);
list_for_each_entry(id, &card->vid_list, list) {
if (id->vid == vid) {
list_del(&id->list);
@@ -373,7 +339,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
break;
}
}
- spin_unlock_bh(&card->vlanlock);
+ mutex_unlock(&card->vid_list_mutex);
if (tmpid) {
rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
@@ -440,15 +406,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
switch (hdr->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb->protocol = eth_type_trans(skb, skb->dev);
- if ((card->dev->features & NETIF_F_RXCSUM)
- && ((hdr->hdr.l2.flags[1] &
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
+ qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
if (skb->protocol == htons(ETH_P_802_2))
*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
@@ -481,7 +439,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
- char vendor_pre[] = {0x02, 0x00, 0x00};
QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
@@ -501,16 +458,20 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
card->info.type == QETH_CARD_TYPE_OSX ||
card->info.guestlan) {
rc = qeth_setadpparms_change_macaddr(card);
- if (rc) {
- QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
- "device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
- return rc;
- }
- } else {
- eth_random_addr(card->dev->dev_addr);
- memcpy(card->dev->dev_addr, vendor_pre, 3);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
+ CARD_BUS_ID(card), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
+ /* fall back once more: */
}
+
+ /* some devices don't support a custom MAC address: */
+ if (card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX)
+ return (rc) ? rc : -EADDRNOTAVAIL;
+ eth_hw_addr_random(card->dev);
+
out:
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
return 0;
@@ -520,6 +481,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct qeth_card *card = dev->ml_priv;
+ u8 old_addr[ETH_ALEN];
int rc = 0;
QETH_CARD_TEXT(card, 3, "setmac");
@@ -531,14 +493,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
- rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == -ENOENT))
- rc = qeth_l2_send_setmac(card, addr->sa_data);
- return rc ? -EINVAL : 0;
+
+ if (!qeth_card_hw_is_reachable(card)) {
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+ }
+
+ /* don't register the same address twice */
+ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
+ (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+
+ /* add the new address, switch over, drop the old */
+ rc = qeth_l2_send_setmac(card, addr->sa_data);
+ if (rc)
+ return rc;
+ ether_addr_copy(old_addr, dev->dev_addr);
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ qeth_l2_remove_mac(card, old_addr);
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ return 0;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -680,7 +663,8 @@ out:
}
static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type)
+ struct qeth_qdio_out_q *queue, int cast_type,
+ int ipv)
{
int push_len = sizeof(struct qeth_hdr);
unsigned int elements, nr_frags;
@@ -718,8 +702,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
hdr_elements = 1;
}
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_l2_hdr_csum(card, hdr, skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
if (!elements) {
@@ -771,6 +758,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb);
+ int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
@@ -778,7 +766,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
- qeth_get_ip_version(skb), cast_type)];
+ ipv, cast_type)];
else
queue = card->qdio.out_qs[card->qdio.default_out_queue];
@@ -801,7 +789,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
- rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
+ rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
}
if (!rc) {
@@ -978,6 +966,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->mtu = card->info.initial_mtu;
card->dev->min_mtu = 64;
card->dev->max_mtu = ETH_MAX_MTU;
+ card->dev->dev_port = card->info.portno;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
if (card->info.type == QETH_CARD_TYPE_OSN) {
card->dev->ethtool_ops = &qeth_l2_osn_ops;
@@ -1006,10 +995,15 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_IP_CSUM;
card->dev->vlan_features |= NETIF_F_IP_CSUM;
}
- if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
- card->dev->hw_features |= NETIF_F_RXCSUM;
- card->dev->vlan_features |= NETIF_F_RXCSUM;
- }
+ }
+ if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+ card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
+ }
+ if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
+ qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_RXCSUM;
+ card->dev->vlan_features |= NETIF_F_RXCSUM;
}
card->info.broadcast_capable = 1;
@@ -1068,8 +1062,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
if (card->info.hwtrap &&
@@ -1309,9 +1304,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l2_discipline = {
.devtype = &qeth_l2_devtype,
- .start_poll = qeth_qdio_start_poll,
- .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
- .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
.process_rx_buffer = qeth_l2_process_inbound_buffer,
.recover = qeth_l2_recover,
.setup = qeth_l2_probe_device,
@@ -1339,8 +1331,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
- rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
- (addr_t) iob, 0, 0);
+ rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+ (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1a16a74aa83..e7fa479adf47 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -735,22 +735,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
-static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
- enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
-{
- int rc;
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 4, "simassp6");
- iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
- 0, QETH_PROT_IPV6);
- if (!iob)
- return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, 0, 0,
- qeth_setassparms_cb, NULL);
- return rc;
-}
-
static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
@@ -851,14 +835,6 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "softipv6");
- rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
- if (rc) {
- dev_err(&card->gdev->dev,
- "Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
- return rc;
- }
-
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
@@ -870,16 +846,16 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6,
- IPA_CMD_ASS_START);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
+ IPA_CMD_ASS_START, 0);
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU,
- IPA_CMD_ASS_START);
+ rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
+ IPA_CMD_ASS_START, 0);
if (rc) {
dev_warn(&card->gdev->dev,
"Enabling the passthrough mode for %s failed\n",
@@ -1293,91 +1269,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
in6_dev_put(in6_dev);
}
-static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
- unsigned short vid)
-{
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
- struct qeth_ipaddr *addr;
- struct net_device *netdev;
-
- QETH_CARD_TEXT(card, 4, "frvaddr4");
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
- if (!netdev)
- return;
- in_dev = in_dev_get(netdev);
- if (!in_dev)
- return;
-
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!addr)
- goto out;
-
- spin_lock_bh(&card->ip_lock);
-
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
- addr->u.a4.addr = be32_to_cpu(ifa->ifa_address);
- addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask);
- addr->type = QETH_IP_TYPE_NORMAL;
- qeth_l3_delete_ip(card, addr);
- }
-
- spin_unlock_bh(&card->ip_lock);
-
- kfree(addr);
-out:
- in_dev_put(in_dev);
-}
-
-static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
- unsigned short vid)
-{
- struct inet6_dev *in6_dev;
- struct inet6_ifaddr *ifa;
- struct qeth_ipaddr *addr;
- struct net_device *netdev;
-
- QETH_CARD_TEXT(card, 4, "frvaddr6");
-
- netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
- if (!netdev)
- return;
-
- in6_dev = in6_dev_get(netdev);
- if (!in6_dev)
- return;
-
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!addr)
- goto out;
-
- spin_lock_bh(&card->ip_lock);
-
- list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
- memcpy(&addr->u.a6.addr, &ifa->addr,
- sizeof(struct in6_addr));
- addr->u.a6.pfxlen = ifa->prefix_len;
- addr->type = QETH_IP_TYPE_NORMAL;
- qeth_l3_delete_ip(card, addr);
- }
-
- spin_unlock_bh(&card->ip_lock);
-
- kfree(addr);
-out:
- in6_dev_put(in6_dev);
-}
-
-static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
- unsigned short vid)
-{
- rcu_read_lock();
- qeth_l3_free_vlan_addresses4(card, vid);
- qeth_l3_free_vlan_addresses6(card, vid);
- rcu_read_unlock();
-}
-
static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -1398,8 +1289,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
- /* unregister IP addresses of vlan device */
- qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
qeth_l3_set_rx_mode(dev);
return 0;
@@ -1454,17 +1343,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
- if (card->dev->features & NETIF_F_RXCSUM) {
- if ((hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
- (QETH_HDR_EXT_CSUM_HDR_REQ |
- QETH_HDR_EXT_CSUM_TRANSP_REQ))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
- } else
- skb->ip_summed = CHECKSUM_NONE;
+ qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
}
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
@@ -2210,23 +2089,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
-static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* tcph->check contains already the pseudo hdr checksum
- * so just set the header flags
- */
- if (iph->protocol == IPPROTO_UDP)
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
- hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
- QETH_HDR_EXT_CSUM_HDR_REQ;
- iph->check = 0;
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
-}
-
static void qeth_tso_fill_header(struct qeth_card *card,
struct qeth_hdr *qhdr, struct sk_buff *skb)
{
@@ -2418,8 +2280,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
}
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- qeth_l3_hdr_csum(card, hdr, new_skb);
+ if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
}
elements = use_tso ?
@@ -2620,28 +2485,32 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
pr_info("qeth_l3: ignoring TR device\n");
return -ENODEV;
- } else {
- card->dev = alloc_etherdev(0);
- if (!card->dev)
- return -ENODEV;
- card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
-
- /*IPv6 address autoconfiguration stuff*/
- qeth_l3_get_unique_id(card);
- if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
- card->dev->dev_id = card->info.unique_id &
- 0xffff;
-
- card->dev->hw_features |= NETIF_F_SG;
- card->dev->vlan_features |= NETIF_F_SG;
-
- if (!card->info.guestlan) {
- card->dev->features |= NETIF_F_SG;
- card->dev->hw_features |= NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- card->dev->vlan_features |= NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- }
+ }
+
+ card->dev = alloc_etherdev(0);
+ if (!card->dev)
+ return -ENODEV;
+ card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
+
+ /*IPv6 address autoconfiguration stuff*/
+ qeth_l3_get_unique_id(card);
+ if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
+ card->dev->dev_id = card->info.unique_id & 0xffff;
+
+ card->dev->hw_features |= NETIF_F_SG;
+ card->dev->vlan_features |= NETIF_F_SG;
+
+ if (!card->info.guestlan) {
+ card->dev->features |= NETIF_F_SG;
+ card->dev->hw_features |= NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ card->dev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ }
+
+ if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
+ card->dev->hw_features |= NETIF_F_IPV6_CSUM;
+ card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
@@ -2663,6 +2532,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->mtu = card->info.initial_mtu;
card->dev->min_mtu = 64;
card->dev->max_mtu = ETH_MAX_MTU;
+ card->dev->dev_port = card->info.portno;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -2960,9 +2830,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
- .start_poll = qeth_qdio_start_poll,
- .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
- .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
.process_rx_buffer = qeth_l3_process_inbound_buffer,
.recover = qeth_l3_recover,
.setup = qeth_l3_probe_device,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3b0c8b8a7634..066b5c3aaae6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
static void __exit smsg_exit(void)
{
- cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+ cpcmd("SET SMSG OFF", NULL, 0, NULL);
device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index c44d7c7ffc92..1754f55e2fac 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -3,7 +3,7 @@
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
+ * the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 84858d5c8257..d62ddd63f4fe 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
int wait;
unsigned long flags = 0;
unsigned long mflags = 0;
+ struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
+ fibptr->hw_fib_va;
fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
if (callback) {
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
wait = 1;
- if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
- struct aac_hba_cmd_req *hbacmd =
- (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
+ hbacmd->iu_type = command;
- hbacmd->iu_type = command;
+ if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
/* bit1 of request_id must be 0 */
hbacmd->request_id =
cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
@@ -1502,9 +1502,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
host = aac->scsi_host_ptr;
scsi_block_requests(host);
aac_adapter_disable_int(aac);
- if (aac->thread->pid != current->pid) {
+ if (aac->thread && aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
kthread_stop(aac->thread);
+ aac->thread = NULL;
jafo = 1;
}
@@ -1591,6 +1592,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac->name);
if (IS_ERR(aac->thread)) {
retval = PTR_ERR(aac->thread);
+ aac->thread = NULL;
goto out;
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2664ea0df35f..f24fb942065d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1562,6 +1562,7 @@ static void __aac_shutdown(struct aac_dev * aac)
up(&fib->event_wait);
}
kthread_stop(aac->thread);
+ aac->thread = NULL;
}
aac_send_shutdown(aac);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 406e94312d4e..211da1d5a869 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2108,12 +2108,12 @@ static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
log_debug(1 << CXGBI_DBG_TOE,
"cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
- if (cxgb4i_cplhandlers[opc])
- cxgb4i_cplhandlers[opc](cdev, skb);
- else {
+ if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
pr_err("No handler for opcode 0x%x.\n", opc);
__kfree_skb(skb);
- }
+ } else
+ cxgb4i_cplhandlers[opc](cdev, skb);
+
return 0;
nomem:
log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 6866975b25f3..5ceea8da7bb6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2051,13 +2051,16 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong ar
}
break;
}
- case I2ORESETCMD:
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ case I2ORESETCMD: {
+ struct Scsi_Host *shost = pHba->host;
+
+ if (shost)
+ spin_lock_irqsave(shost->host_lock, flags);
adpt_hba_reset(pHba);
- if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if (shost)
+ spin_unlock_irqrestore(shost->host_lock, flags);
break;
+ }
case I2ORESCANCMD:
adpt_rescan(pHba);
break;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index abddde11982b..98597b59c12a 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Abort FW Timeouts: %lld\n"
"Number of Abort IO NOT Found: %lld\n"
- "Abord issued times: \n"
+ "Abort issued times: \n"
" < 6 sec : %lld\n"
" 6 sec - 20 sec : %lld\n"
" 20 sec - 30 sec : %lld\n"
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 7649d63a1b8d..3771e59a9fae 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -472,7 +472,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- shost->use_blk_mq = scsi_use_blk_mq;
shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
device_initialize(&shost->shost_gendev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index edb7be786c65..9e8de1462593 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
* Note: We have not moved the current phy_index so we will actually
* compare the startting phy with itself.
* This is expected and required to add the phy to the port. */
- while (phy_index < SCI_MAX_PHYS) {
+ for (; phy_index < SCI_MAX_PHYS; phy_index++) {
if ((phy_mask & (1 << phy_index)) == 0)
continue;
sci_phy_get_sas_address(&ihost->phys[phy_index],
@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
&ihost->phys[phy_index]);
assigned_phy_mask |= (1 << phy_index);
- phy_index++;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ce97cde3b41c..f4d988dd1e9d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_fw_init;
}
- ret = 0;
+ return 0;
fail_fw_init:
dev_err(&instance->pdev->dev,
- "Init cmd return status %s for SCSI host %d\n",
- ret ? "FAILED" : "SUCCESS", instance->host->host_no);
+ "Init cmd return status FAILED for SCSI host %d\n",
+ instance->host->host_no);
return ret;
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index c105a2e48ac1..cabb6af60fb8 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -383,11 +383,16 @@ struct qedf_ctx {
u32 flogi_failed;
/* Used for fc statistics */
+ struct mutex stats_mutex;
u64 input_requests;
u64 output_requests;
u64 control_requests;
u64 packet_aborts;
u64 alloc_failures;
+ u8 lun_resets;
+ u8 target_resets;
+ u8 task_set_fulls;
+ u8 busy;
};
struct io_bdt {
@@ -496,7 +501,9 @@ extern int qedf_post_io_req(struct qedf_rport *fcport,
extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_send_flogi(struct qedf_ctx *qedf);
+extern void qedf_get_protocol_tlv_data(void *dev, void *data);
extern void qedf_fp_io_handler(struct work_struct *work);
+extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index c539a7ae3a7e..5789ce185923 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -439,7 +439,6 @@ qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
return single_open(file, qedf_offload_stats_show, qedf);
}
-
const struct file_operations qedf_dbg_fops[] = {
qedf_dbg_fileops(qedf, fp_int),
qedf_dbg_fileops_seq(qedf, io_trace),
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 773558fc0697..16d1a21cdff9 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
struct fip_vlan *vlan;
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
+ unsigned long flags = 0;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
@@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
kfree_skb(skb);
return;
}
- qed_ops->ll2->start_xmit(qedf->cdev, skb);
+
+ set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags);
+ qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
}
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
@@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false);
- qed_ops->ll2->start_xmit(qedf->cdev, skb);
+ qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
}
/* Process incoming FIP frames. */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 50a50c4249d0..3fe579d0f1a8 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1200,6 +1200,12 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
fcport->retry_delay_timestamp =
jiffies + (qualifier * HZ / 10);
}
+ /* Record stats */
+ if (io_req->cdb_status ==
+ SAM_STAT_TASK_SET_FULL)
+ qedf->task_set_fulls++;
+ else
+ qedf->busy++;
}
}
if (io_req->fcp_resid)
@@ -1866,6 +1872,11 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
goto reset_tmf_err;
}
+ if (tm_flags == FCP_TMF_LUN_RESET)
+ qedf->lun_resets++;
+ else if (tm_flags == FCP_TMF_TGT_RESET)
+ qedf->target_resets++;
+
/* Initialize rest of io_req fields */
io_req->sc_cmd = sc_cmd;
io_req->fcport = fcport;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 284ccb566b19..d3f73d8d7738 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -566,6 +566,8 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
.dcbx_aen = qedf_dcbx_handler,
+ .get_generic_tlv_data = qedf_get_generic_tlv_data,
+ .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
}
};
@@ -994,7 +996,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1, skb->data, skb->len, false);
- qed_ops->ll2->start_xmit(qedf->cdev, skb);
+ qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
return 0;
}
@@ -1746,6 +1748,8 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
goto out;
}
+ mutex_lock(&qedf->stats_mutex);
+
/* Query firmware for offload stats */
qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
@@ -1779,6 +1783,7 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
+ mutex_unlock(&qedf->stats_mutex);
kfree(fw_fcoe_stats);
out:
return qedf_stats;
@@ -2948,6 +2953,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
+ mutex_init(&qedf->stats_mutex);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3393,6 +3399,104 @@ static void qedf_remove(struct pci_dev *pdev)
}
/*
+ * Protocol TLV handler
+ */
+void qedf_get_protocol_tlv_data(void *dev, void *data)
+{
+ struct qedf_ctx *qedf = dev;
+ struct qed_mfw_tlv_fcoe *fcoe = data;
+ struct fc_lport *lport = qedf->lport;
+ struct Scsi_Host *host = lport->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_host_statistics *hst;
+
+ /* Force a refresh of the fc_host stats including offload stats */
+ hst = qedf_fc_get_host_stats(host);
+
+ fcoe->qos_pri_set = true;
+ fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
+
+ fcoe->ra_tov_set = true;
+ fcoe->ra_tov = lport->r_a_tov;
+
+ fcoe->ed_tov_set = true;
+ fcoe->ed_tov = lport->e_d_tov;
+
+ fcoe->npiv_state_set = true;
+ fcoe->npiv_state = 1; /* NPIV always enabled */
+
+ fcoe->num_npiv_ids_set = true;
+ fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
+
+ /* Certain attributes we only want to set if we've selected an FCF */
+ if (qedf->ctlr.sel_fcf) {
+ fcoe->switch_name_set = true;
+ u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
+ }
+
+ fcoe->port_state_set = true;
+ /* For qedf we're either link down or fabric attach */
+ if (lport->link_up)
+ fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
+ else
+ fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
+
+ fcoe->link_failures_set = true;
+ fcoe->link_failures = (u16)hst->link_failure_count;
+
+ fcoe->fcoe_txq_depth_set = true;
+ fcoe->fcoe_rxq_depth_set = true;
+ fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
+ fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
+
+ fcoe->fcoe_rx_frames_set = true;
+ fcoe->fcoe_rx_frames = hst->rx_frames;
+
+ fcoe->fcoe_tx_frames_set = true;
+ fcoe->fcoe_tx_frames = hst->tx_frames;
+
+ fcoe->fcoe_rx_bytes_set = true;
+ fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
+
+ fcoe->fcoe_tx_bytes_set = true;
+ fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
+
+ fcoe->crc_count_set = true;
+ fcoe->crc_count = hst->invalid_crc_count;
+
+ fcoe->tx_abts_set = true;
+ fcoe->tx_abts = hst->fcp_packet_aborts;
+
+ fcoe->tx_lun_rst_set = true;
+ fcoe->tx_lun_rst = qedf->lun_resets;
+
+ fcoe->abort_task_sets_set = true;
+ fcoe->abort_task_sets = qedf->packet_aborts;
+
+ fcoe->scsi_busy_set = true;
+ fcoe->scsi_busy = qedf->busy;
+
+ fcoe->scsi_tsk_full_set = true;
+ fcoe->scsi_tsk_full = qedf->task_set_fulls;
+}
+
+/* Generic TLV data callback */
+void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+ struct qedf_ctx *qedf;
+
+ if (!dev) {
+ QEDF_INFO(NULL, QEDF_LOG_EVT,
+ "dev is NULL so ignoring get_generic_tlv_data request.\n");
+ return;
+ }
+ qedf = (struct qedf_ctx *)dev;
+
+ memset(data, 0, sizeof(struct qed_generic_tlvs));
+ ether_addr_copy(data->mac[0], qedf->mac);
+}
+
+/*
* Module Init/Remove
*/
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index b8b22ce60ecc..fc3babc15fa3 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -353,6 +353,9 @@ struct qedi_ctx {
#define IPV6_LEN 41
#define IPV4_LEN 17
struct iscsi_boot_kset *boot_kset;
+
+ /* Used for iscsi statistics */
+ struct mutex stats_lock;
};
struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 7ec7f6e00fb8..ff21aa1fd939 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1150,7 +1150,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
if (vlanid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
- rc = qedi_ops->ll2->start_xmit(cdev, skb);
+ rc = qedi_ops->ll2->start_xmit(cdev, skb, 0);
if (rc) {
QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
rc);
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index ea1315189922..11260776212f 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -223,6 +223,12 @@ struct qedi_work_map {
struct work_struct *ptr_tmf_work;
};
+struct qedi_boot_target {
+ char ip_addr[64];
+ char iscsi_name[255];
+ u32 ipv6_en;
+};
+
#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 4da3592aec0f..32ee7f62fef9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -55,6 +55,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi);
static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -879,6 +880,201 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
kfree(qedi->global_queues);
}
+static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
+ struct qedi_boot_target *tgt, u8 index)
+{
+ u32 ipv6_en;
+
+ ipv6_en = !!(block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
+
+ snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+ block->target[index].target_name.byte);
+
+ tgt->ipv6_en = ipv6_en;
+
+ if (ipv6_en)
+ snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
+ block->target[index].ipv6_addr.byte);
+ else
+ snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
+ block->target[index].ipv4_addr.byte);
+}
+
+static int qedi_find_boot_info(struct qedi_ctx *qedi,
+ struct qed_mfw_tlv_iscsi *iscsi,
+ struct nvm_iscsi_block *block)
+{
+ struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
+ u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct qedi_conn *qedi_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ep_ip_addr[64];
+ int i, ret = 0;
+
+ pri_ctrl_flags = !!(block->target[0].ctrl_flags &
+ NVM_ISCSI_CFG_TARGET_ENABLED);
+ if (pri_ctrl_flags) {
+ pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
+ if (!pri_tgt)
+ return -1;
+ qedi_get_boot_tgt_info(block, pri_tgt, 0);
+ }
+
+ sec_ctrl_flags = !!(block->target[1].ctrl_flags &
+ NVM_ISCSI_CFG_TARGET_ENABLED);
+ if (sec_ctrl_flags) {
+ sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
+ if (!sec_tgt) {
+ ret = -1;
+ goto free_tgt;
+ }
+ qedi_get_boot_tgt_info(block, sec_tgt, 1);
+ }
+
+ for (i = 0; i < qedi->max_active_conns; i++) {
+ qedi_conn = qedi_get_conn_from_id(qedi, i);
+ if (!qedi_conn)
+ continue;
+
+ if (qedi_conn->ep->ip_type == TCP_IPV4)
+ snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
+ qedi_conn->ep->dst_addr);
+ else
+ snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
+ qedi_conn->ep->dst_addr);
+
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
+ cls_sess = iscsi_conn_to_session(cls_conn);
+ sess = cls_sess->dd_data;
+
+ if (pri_ctrl_flags) {
+ if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
+ !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (sec_ctrl_flags) {
+ if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
+ !strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ if (conn->hdrdgst_en) {
+ iscsi->header_digest_set = true;
+ iscsi->header_digest = 1;
+ }
+
+ if (conn->datadgst_en) {
+ iscsi->data_digest_set = true;
+ iscsi->data_digest = 1;
+ }
+ iscsi->boot_taget_portal_set = true;
+ iscsi->boot_taget_portal = sess->tpgt;
+
+ } else {
+ ret = -1;
+ }
+
+ if (sec_ctrl_flags)
+ kfree(sec_tgt);
+free_tgt:
+ if (pri_ctrl_flags)
+ kfree(pri_tgt);
+
+ return ret;
+}
+
+static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+ struct qedi_ctx *qedi;
+
+ if (!dev) {
+ QEDI_INFO(NULL, QEDI_LOG_EVT,
+ "dev is NULL so ignoring get_generic_tlv_data request.\n");
+ return;
+ }
+ qedi = (struct qedi_ctx *)dev;
+
+ memset(data, 0, sizeof(struct qed_generic_tlvs));
+ ether_addr_copy(data->mac[0], qedi->mac);
+}
+
+/*
+ * Protocol TLV handler
+ */
+static void qedi_get_protocol_tlv_data(void *dev, void *data)
+{
+ struct qed_mfw_tlv_iscsi *iscsi = data;
+ struct qed_iscsi_stats *fw_iscsi_stats;
+ struct nvm_iscsi_block *block = NULL;
+ u32 chap_en = 0, mchap_en = 0;
+ struct qedi_ctx *qedi = dev;
+ int rval = 0;
+
+ fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
+ if (!fw_iscsi_stats) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Could not allocate memory for fw_iscsi_stats.\n");
+ goto exit_get_data;
+ }
+
+ mutex_lock(&qedi->stats_lock);
+ /* Query firmware for offload stats */
+ qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
+ mutex_unlock(&qedi->stats_lock);
+
+ iscsi->rx_frames_set = true;
+ iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
+ iscsi->rx_bytes_set = true;
+ iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
+ iscsi->tx_frames_set = true;
+ iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
+ iscsi->tx_bytes_set = true;
+ iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
+ iscsi->frame_size_set = true;
+ iscsi->frame_size = qedi->ll2_mtu;
+ block = qedi_get_nvram_block(qedi);
+ if (block) {
+ chap_en = !!(block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
+ mchap_en = !!(block->generic.ctrl_flags &
+ NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
+
+ iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
+ iscsi->auth_method = 1;
+ if (chap_en)
+ iscsi->auth_method = 2;
+ if (mchap_en)
+ iscsi->auth_method = 3;
+
+ iscsi->tx_desc_size_set = true;
+ iscsi->tx_desc_size = QEDI_SQ_SIZE;
+ iscsi->rx_desc_size_set = true;
+ iscsi->rx_desc_size = QEDI_CQ_SIZE;
+
+ /* tpgt, hdr digest, data digest */
+ rval = qedi_find_boot_info(qedi, iscsi, block);
+ if (rval)
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Boot target not set");
+ }
+
+ kfree(fw_iscsi_stats);
+exit_get_data:
+ return;
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -896,6 +1092,8 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
+ .get_protocol_tlv_data = qedi_get_protocol_tlv_data,
+ .get_generic_tlv_data = qedi_get_generic_tlv_data,
}
};
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 2288757b5c9e..9e914f9c3ffb 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3794,6 +3794,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -3812,7 +3813,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla24xx_async_gffid_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4230,6 +4230,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
@@ -4246,7 +4248,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4370,6 +4371,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
rspsz = sizeof(struct ct_sns_gpnft_rsp) +
@@ -4385,7 +4388,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4495,6 +4497,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -4516,7 +4519,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gnnid_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4632,6 +4634,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -4653,7 +4656,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gfpnid_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8aeb0ed524a1..8f55dd44adae 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -183,10 +183,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
@@ -245,10 +246,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_logout_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -307,10 +309,11 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLO_CMD;
sp->name = "prlo";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_prlo_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -412,10 +415,11 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -745,6 +749,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
mb = sp->u.iocb_cmd.u.mbx.out_mb;
@@ -757,9 +763,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
mb[8] = vha->gnl.size;
mb[9] = vha->vp_idx;
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
-
sp->done = qla24xx_async_gnl_sp_done;
rval = qla2x00_start_sp(sp);
@@ -887,10 +890,11 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLI_CMD;
sp->name = "prli";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
@@ -955,6 +959,9 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -974,8 +981,6 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
mbx->u.mbx.in = (void *)pd;
mbx->u.mbx.in_dma = pd_dma;
@@ -1490,13 +1495,15 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb = &sp->u.iocb_cmd;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
+
+ tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+
tm_iocb->u.tmf.flags = flags;
tm_iocb->u.tmf.lun = lun;
tm_iocb->u.tmf.data = tag;
sp->done = qla2x00_tmf_sp_done;
- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
- init_completion(&tm_iocb->u.tmf.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -1550,8 +1557,8 @@ qla24xx_abort_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- del_timer(&sp->u.iocb_cmd.timer);
- complete(&abt->u.abt.comp);
+ if (del_timer(&sp->u.iocb_cmd.timer))
+ complete(&abt->u.abt.comp);
}
int
@@ -1570,7 +1577,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
if (vha->flags.qpairs_available && cmd_sp->qpair)
@@ -1580,8 +1591,6 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
sp->done = qla24xx_abort_sp_done;
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
- init_completion(&abt_iocb->u.abt.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b7a05aebf065..37ae0f6d8ae5 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -272,13 +272,13 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
- add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
+ add_timer(&sp->u.iocb_cmd.timer);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index f74ff7b550b6..a91cca52b5d5 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2460,8 +2460,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
@@ -2658,8 +2658,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
+ init_completion(&elsio->u.els_plogi.comp);
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+
sp->done = qla2x00_els_dcmd2_sp_done;
sp->free = qla2x00_els_dcmd2_sp_free;
@@ -2696,7 +2699,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
- init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7cacdc3408fa..a3dc83f9444d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2174,7 +2174,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x1);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2184,7 +2184,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x3);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2194,7 +2194,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x2);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2347,7 +2347,7 @@ done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
- sp->done(sp, DID_OK << 6);
+ sp->done(sp, DID_OK << 16);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 5db0262d5c94..d8a36c13aeda 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6023,14 +6023,14 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
sp->type = SRB_MB_IOCB;
sp->name = mb_to_str(mcp->mb[0]);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
-
c = &sp->u.iocb_cmd;
c->timeout = qla2x00_async_iocb_timeout;
init_completion(&c->u.mbx.comp);
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
sp->done = qla2x00_async_mb_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index da85cd89639f..f6f0a759a7c2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -929,8 +929,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
sp->done = qla_ctrlvp_sp_done;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 7113acf42ff3..521a51370554 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1821,9 +1821,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
sp->type = SRB_FXIOCB_DCMD;
sp->name = "fxdisc";
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
fdisc = &sp->u.iocb_cmd;
+ fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+ qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+
switch (fx_type) {
case FXDISC_GET_CONFIG_INFO:
fdisc->u.fxiocb.flags =
@@ -1924,7 +1926,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
goto done_unmap_req;
}
- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
sp->done = qla2x00_fxdisc_sp_done;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2bbf0bff0da0..15eaa6dded04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -470,9 +470,6 @@ fail_req_map:
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
- if (!ha->req_q_map)
- return;
-
if (IS_QLAFX00(ha)) {
if (req && req->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
@@ -483,17 +480,14 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
- if (req) {
+ if (req)
kfree(req->outstanding_cmds);
- kfree(req);
- }
+
+ kfree(req);
}
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
- if (!ha->rsp_q_map)
- return;
-
if (IS_QLAFX00(ha)) {
if (rsp && rsp->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
@@ -504,8 +498,7 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
}
- if (rsp)
- kfree(rsp);
+ kfree(rsp);
}
static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -3106,7 +3099,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
/* Alloc arrays of request and response ring ptrs */
- if (qla2x00_alloc_queues(ha, req, rsp)) {
+ ret = qla2x00_alloc_queues(ha, req, rsp);
+ if (ret) {
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers..."
"aborting.\n");
@@ -3407,8 +3401,15 @@ probe_failed:
}
qla2x00_free_device(base_vha);
-
scsi_host_put(base_vha->host);
+ /*
+ * Need to NULL out local req/rsp after
+ * qla2x00_free_device => qla2x00_free_queues frees
+ * what these are pointing to. Or else we'll
+ * fall over below in qla2x00_free_req/rsp_que.
+ */
+ req = NULL;
+ rsp = NULL;
probe_hw_failed:
qla2x00_mem_free(ha);
@@ -4114,6 +4115,7 @@ fail_npiv_info:
(*rsp)->dma = 0;
fail_rsp_ring:
kfree(*rsp);
+ *rsp = NULL;
fail_rsp:
dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
sizeof(request_t), (*req)->ring, (*req)->dma);
@@ -4121,6 +4123,7 @@ fail_rsp:
(*req)->dma = 0;
fail_req_ring:
kfree(*req);
+ *req = NULL;
fail_req:
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
ha->ct_sns, ha->ct_sns_dma);
@@ -4508,16 +4511,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
- if (ha->optrom_buffer)
- vfree(ha->optrom_buffer);
- if (ha->nvram)
- kfree(ha->nvram);
- if (ha->npiv_info)
- kfree(ha->npiv_info);
- if (ha->swl)
- kfree(ha->swl);
- if (ha->loop_id_map)
- kfree(ha->loop_id_map);
+ vfree(ha->optrom_buffer);
+ kfree(ha->nvram);
+ kfree(ha->npiv_info);
+ kfree(ha->swl);
+ kfree(ha->loop_id_map);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5546ac9c3d9d..025dc2d3f3de 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -664,10 +664,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->type = type;
sp->name = "nack";
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9ef5e3b810f6..656c98e116a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
#define F_INV_OP 0x200
#define F_FAKE_RW 0x400
#define F_M_ACCESS 0x800 /* media access */
-#define F_LONG_DELAY 0x1000
+#define F_SSU_DELAY 0x1000
+#define F_SYNC_DELAY 0x2000
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
+#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
#define SDEBUG_MAX_PARTS 4
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
};
static const struct opcode_info_t sync_cache_iarr[] = {
- {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
+ {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
resp_write_dt0, write_iarr, /* WRITE(16) */
{16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
- {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
+ {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS,
+ {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
resp_sync_cache, sync_cache_iarr,
{10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
static bool sdebug_verbose;
static bool have_dif_prot;
+static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
{
unsigned char *cmd = scp->cmnd;
int power_cond, stop;
+ bool changing;
power_cond = (cmd[4] & 0xf0) >> 4;
if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
return check_condition_result;
}
stop = !(cmd[4] & 1);
+ changing = atomic_read(&devip->stopped) == !stop;
atomic_xchg(&devip->stopped, stop);
- return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+ if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
+ return SDEG_RES_IMMED_MASK;
+ else
+ return 0;
}
static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
if (do_write) {
sdb = scsi_out(scmd);
dir = DMA_TO_DEVICE;
+ write_since_sync = true;
} else {
sdb = scsi_in(scmd);
dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
+ int res = 0;
u64 lba;
u32 num_blocks;
u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
}
- return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+ if (!write_since_sync || cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ else /* delay if write_since_sync and IMMED clear */
+ write_since_sync = false;
+ return res;
}
#define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
/*
- * If any delay is active, want F_LONG_DELAY to be at least 1
+ * If any delay is active, for F_SSU_DELAY want at least 1
* second and if sdebug_jdelay>0 want a long delay of that
- * many seconds.
+ * many seconds; for F_SYNC_DELAY want 1/20 of that.
*/
int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
+ int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
- jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ);
+ jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
} else
return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index e5370d718058..dd107dc4db0e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -202,7 +202,7 @@ static struct {
{"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
{"MICROP", "4110", NULL, BLIST_NOTQ},
- {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
+ {"MSFT", "Virtual HD", NULL, BLIST_MAX_1024 | BLIST_NO_RSOC},
{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index b88b5dbbc444..188f30572aa1 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -112,6 +112,9 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
{
struct scsi_device_handler *dh;
+ if (!name || strlen(name) == 0)
+ return NULL;
+
dh = __scsi_dh_lookup(name);
if (!dh) {
request_module("scsi_dh_%s", name);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0dfec0dedd5e..e9b4f279d29c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -723,18 +723,25 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
}
/**
- * __scsi_error_from_host_byte - translate SCSI error code into errno
- * @cmd: SCSI command (unused)
+ * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
+ * @cmd: SCSI command
* @result: scsi error code
*
- * Translate SCSI error code into block errors.
+ * Translate a SCSI result code into a blk_status_t value. May reset the host
+ * byte of @cmd->result.
*/
-static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
- int result)
+static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
{
switch (host_byte(result)) {
case DID_OK:
- return BLK_STS_OK;
+ /*
+ * Also check the other bytes than the status byte in result
+ * to handle the case when a SCSI LLD sets result to
+ * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
+ */
+ if (scsi_status_is_good(result) && (result & ~0xff) == 0)
+ return BLK_STS_OK;
+ return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
return BLK_STS_TRANSPORT;
case DID_TARGET_FAILURE:
@@ -812,10 +819,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
}
/*
- * __scsi_error_from_host_byte may have reset the host_byte
+ * scsi_result_to_blk_status may have reset the host_byte
*/
scsi_req(req)->result = cmd->result;
scsi_req(req)->resid_len = scsi_get_resid(cmd);
@@ -837,7 +844,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
}
/* no bidi support for !blk_rq_is_passthrough yet */
@@ -907,7 +914,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (result == 0)
goto requeue;
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4b52b44b966..65f6c94f2e9b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
return nlmsg_multicast(nls, skb, 0, group, gfp);
}
+static int
+iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
+{
+ return nlmsg_unicast(nls, skb, portid);
+}
+
int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
static int
-iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
- void *payload, int size)
+iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int len = nlmsg_total_size(size);
- int flags = multi ? NLM_F_MULTI : 0;
- int t = done ? NLMSG_DONE : type;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
return -ENOMEM;
}
- nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
- nlh->nlmsg_flags = flags;
+ nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
memcpy(nlmsg_data(nlh), payload, size);
- return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
+ return iscsi_unicast_skb(skb, portid);
}
static int
@@ -3470,6 +3472,7 @@ static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
+ u32 portid;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
if (!try_module_get(transport->owner))
return -EINVAL;
+ portid = NETLINK_CB(skb).portid;
+
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CB(skb).portid,
+ portid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
- NETLINK_CB(skb).portid,
+ portid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
static void
iscsi_if_rx(struct sk_buff *skb)
{
+ u32 portid = NETLINK_CB(skb).portid;
+
mutex_lock(&rx_queue_mutex);
while (skb->len >= NLMSG_HDRLEN) {
int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
- err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+ err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
+ ev, sizeof(*ev));
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a6201e696ab9..9421d9877730 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 41df75eea57b..210407cd2341 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
*
* Check that all zones of the device are equal. The last zone can however
* be smaller. The zone size must also be a power of two number of LBAs.
+ *
+ * Returns the zone size in bytes upon success or an error code upon failure.
*/
-static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
u64 zone_blocks = 0;
sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
int ret;
u8 same;
- sdkp->zone_blocks = 0;
-
/* Get a buffer */
buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
/* Parse zone descriptors */
while (rec < buf + buf_len) {
- zone_blocks = get_unaligned_be64(&rec[8]);
- if (sdkp->zone_blocks == 0) {
- sdkp->zone_blocks = zone_blocks;
- } else if (zone_blocks != sdkp->zone_blocks &&
- (block + zone_blocks < sdkp->capacity
- || zone_blocks > sdkp->zone_blocks)) {
- zone_blocks = 0;
+ u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
+
+ if (zone_blocks == 0) {
+ zone_blocks = this_zone_blocks;
+ } else if (this_zone_blocks != zone_blocks &&
+ (block + this_zone_blocks < sdkp->capacity
+ || this_zone_blocks > zone_blocks)) {
+ this_zone_blocks = 0;
goto out;
}
- block += zone_blocks;
+ block += this_zone_blocks;
rec += 64;
}
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
} while (block < sdkp->capacity);
- zone_blocks = sdkp->zone_blocks;
-
out:
if (!zone_blocks) {
if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
"Zone size too large\n");
ret = -ENODEV;
} else {
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = ilog2(zone_blocks);
+ ret = zone_blocks;
}
out_free:
@@ -500,15 +498,14 @@ out_free:
/**
* sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @sdkp: The disk of the bitmap
+ * @nr_zones: Number of zones to allocate space for.
+ * @numa_node: NUMA node to allocate the memory from.
*/
-static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+static inline unsigned long *
+sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
{
- struct request_queue *q = sdkp->disk->queue;
-
- return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
- * sizeof(unsigned long),
- GFP_KERNEL, q->node);
+ return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
+ GFP_KERNEL, numa_node);
}
/**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
* @sdkp: disk used
* @buf: report reply buffer
* @buflen: length of @buf
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
* @seq_zones_bitmap: bitmap of sequential zones to set
*
* Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
* Return the LBA after the last zone reported.
*/
static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen,
+ unsigned int buflen, u32 zone_shift,
unsigned long *seq_zones_bitmap)
{
sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
if (type != ZBC_ZONE_TYPE_CONV &&
cond != ZBC_ZONE_COND_READONLY &&
cond != ZBC_ZONE_COND_OFFLINE)
- set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
+ set_bit(lba >> zone_shift, seq_zones_bitmap);
next_lba = lba + get_unaligned_be64(&rec[8]);
rec += 64;
}
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
+ * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
* @sdkp: target disk
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
+ * @nr_zones: number of zones to set up a seq zone bitmap for
*
* Allocate a zone bitmap and initialize it by identifying sequential zones.
*/
-static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+static unsigned long *
+sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
+ u32 nr_zones)
{
struct request_queue *q = sdkp->disk->queue;
unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
unsigned char *buf;
int ret = -ENOMEM;
- seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
+ seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
if (!seq_zones_bitmap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
if (ret)
goto out;
lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
- seq_zones_bitmap);
+ zone_shift, seq_zones_bitmap);
}
if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
kfree(buf);
if (ret) {
kfree(seq_zones_bitmap);
- return ret;
+ return ERR_PTR(ret);
}
-
- q->seq_zones_bitmap = seq_zones_bitmap;
-
- return 0;
+ return seq_zones_bitmap;
}
static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
q->nr_zones = 0;
}
-static int sd_zbc_setup(struct scsi_disk *sdkp)
+static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
{
struct request_queue *q = sdkp->disk->queue;
+ u32 zone_shift = ilog2(zone_blocks);
+ u32 nr_zones;
int ret;
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
/* chunk_sectors indicates the zone size */
- blk_queue_chunk_sectors(sdkp->disk->queue,
- logical_to_sectors(sdkp->device, sdkp->zone_blocks));
- sdkp->nr_zones =
- round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
+ blk_queue_chunk_sectors(q,
+ logical_to_sectors(sdkp->device, zone_blocks));
+ nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
/*
* Initialize the device request queue information if the number
* of zones changed.
*/
- if (sdkp->nr_zones != q->nr_zones) {
-
- sd_zbc_cleanup(sdkp);
-
- q->nr_zones = sdkp->nr_zones;
- if (sdkp->nr_zones) {
- q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
- if (!q->seq_zones_wlock) {
+ if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
+ unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+ size_t zone_bitmap_size;
+
+ if (nr_zones) {
+ seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
+ q->node);
+ if (!seq_zones_wlock) {
ret = -ENOMEM;
goto err;
}
- ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
- if (ret) {
- sd_zbc_cleanup(sdkp);
+ seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
+ zone_shift, nr_zones);
+ if (IS_ERR(seq_zones_bitmap)) {
+ ret = PTR_ERR(seq_zones_bitmap);
+ kfree(seq_zones_wlock);
goto err;
}
}
-
+ zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
+ sizeof(unsigned long);
+ blk_mq_freeze_queue(q);
+ if (q->nr_zones != nr_zones) {
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->zone_shift = zone_shift;
+ sdkp->nr_zones = nr_zones;
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, seq_zones_wlock);
+ swap(q->seq_zones_bitmap, seq_zones_bitmap);
+ } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
+ zone_bitmap_size) != 0) {
+ memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
+ zone_bitmap_size);
+ }
+ blk_mq_unfreeze_queue(q);
+ kfree(seq_zones_wlock);
+ kfree(seq_zones_bitmap);
}
return 0;
@@ -661,6 +680,7 @@ err:
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
+ int64_t zone_blocks;
int ret;
if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- ret = sd_zbc_check_zone_size(sdkp);
- if (ret)
+ zone_blocks = sd_zbc_check_zone_size(sdkp);
+ ret = -EFBIG;
+ if (zone_blocks != (u32)zone_blocks)
+ goto err;
+ ret = zone_blocks;
+ if (ret < 0)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- ret = sd_zbc_setup(sdkp);
+ ret = sd_zbc_setup(sdkp, zone_blocks);
if (ret)
goto err;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0cf25d789d05..3f3cb72e0c0c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -587,18 +587,28 @@ out:
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- struct scsi_cd *cd = scsi_cd(disk);
+ unsigned int ret = 0;
+ struct scsi_cd *cd;
- if (atomic_read(&cd->device->disk_events_disable_depth))
+ cd = scsi_cd_get(disk);
+ if (!cd)
return 0;
- return cdrom_check_events(&cd->cdi, clearing);
+ if (!atomic_read(&cd->device->disk_events_disable_depth))
+ ret = cdrom_check_events(&cd->cdi, clearing);
+
+ scsi_cd_put(cd);
+ return ret;
}
static int sr_block_revalidate_disk(struct gendisk *disk)
{
- struct scsi_cd *cd = scsi_cd(disk);
struct scsi_sense_hdr sshdr;
+ struct scsi_cd *cd;
+
+ cd = scsi_cd_get(disk);
+ if (!cd)
+ return -ENXIO;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
@@ -607,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
out:
+ scsi_cd_put(cd);
return 0;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8c51d628b52e..a2ec0bc9e9fa 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1722,11 +1722,14 @@ static int storvsc_probe(struct hv_device *device,
max_targets = STORVSC_MAX_TARGETS;
max_channels = STORVSC_MAX_CHANNELS;
/*
- * On Windows8 and above, we support sub-channels for storage.
+ * On Windows8 and above, we support sub-channels for storage
+ * on SCSI and FC controllers.
* The number of sub-channels offerred is based on the number of
* VCPUs in the guest.
*/
- max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
+ if (!dev_is_ide)
+ max_sub_channels =
+ (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
}
scsi_driver.can_queue = (max_outstanding_req_per_channel *
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5b1bf1cadcb..00e79057f870 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
*val = ' ';
}
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+ const char *str)
+{
+ struct utp_task_req_desc *descp;
+ struct utp_upiu_task_req *task_req;
+ int off = (int)tag - hba->nutrs;
+
+ descp = &hba->utmrdl_base_addr[off];
+ task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+ &task_req->input_param1);
+}
+
static void ufshcd_add_command_trace(struct ufs_hba *hba,
unsigned int tag, const char *str)
{
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp;
int transfer_len = -1;
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
if (!trace_ufshcd_command_enabled())
return;
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ ufshcd_add_query_upiu_trace(hba, tag, "query_send");
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+ ufshcd_add_query_upiu_trace(hba, tag,
+ err ? "query_complete_err" : "query_complete");
+
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
spin_unlock_irqrestore(host->host_lock, flags);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
/* wait until the task management command is completed */
err = wait_event_timeout(hba->tm_wq,
test_bit(free_slot, &hba->tm_condition),
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
err = -ETIMEDOUT;
} else {
err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
}
clear_bit(free_slot, &hba->tm_condition);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c374e3b5c678..777e5f1e52d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
break;
case BTSTAT_ABORTQUEUE:
- cmd->result = (DID_ABORT << 16);
+ cmd->result = (DID_BUS_BUSY << 16);
break;
case BTSTAT_SCSIPARITY:
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 884419c37e84..457ea1f8db30 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
};
- clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+ code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
return sizetocode[code - 1];
}
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index fe96a8b956fb..f7ed1187518b 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -45,7 +45,7 @@ struct rpi_power_domains {
struct rpi_power_domain_packet {
u32 domain;
u32 on;
-} __packet;
+};
/*
* Asks the firmware to enable or disable power on a specific power
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index b6a436594a19..caf45cf7aa8e 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -147,6 +147,12 @@ static __init const char *exynos_get_domain_name(struct device_node *node)
return kstrdup_const(name, GFP_KERNEL);
}
+static const char *soc_force_no_clk[] = {
+ "samsung,exynos5250-clock",
+ "samsung,exynos5420-clock",
+ "samsung,exynos5800-clock",
+};
+
static __init int exynos4_pm_init_power_domain(void)
{
struct device_node *np;
@@ -183,6 +189,11 @@ static __init int exynos4_pm_init_power_domain(void)
pd->pd.power_on = exynos_pd_power_on;
pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+ for (i = 0; i < ARRAY_SIZE(soc_force_no_clk); i++)
+ if (of_find_compatible_node(NULL, NULL,
+ soc_force_no_clk[i]))
+ goto no_clk;
+
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
char clk_name[8];
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 1596d35498c5..6573152ce893 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
{
- if (!has_bspi(qspi) || (qspi->bspi_enabled))
+ if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 1;
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
{
- if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+ if (!has_bspi(qspi))
return;
qspi->bspi_enabled = 0;
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
{
- u32 data = 0;
+ u32 rd = 0;
+ u32 wr = 0;
- if (qspi->curr_cs == cs)
- return;
if (qspi->base[CHIP_SELECT]) {
- data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
- data = (data & ~0xff) | (1 << cs);
- bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+ rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+ wr = (rd & ~0xff) | (1 << cs);
+ if (rd == wr)
+ return;
+ bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
usleep_range(10, 20);
}
+
+ dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
qspi->curr_cs = cs;
}
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
}
mspi_cdram = MSPI_CDRAM_CONT_BIT;
- mspi_cdram |= (~(1 << spi->chip_select) &
- MSPI_CDRAM_PCS);
+
+ if (has_bspi(qspi))
+ mspi_cdram &= ~1;
+ else
+ mspi_cdram |= (~(1 << spi->chip_select) &
+ MSPI_CDRAM_PCS);
+
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
MSPI_CDRAM_BITSE_BIT);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 1431cb98fe40..3094d818cf06 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
irqreturn_t ret = IRQ_NONE;
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return ret;
+
/* check if we have data to read */
while (bs->rx_len &&
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 5c9516ae4942..4a001634023e 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
(xspi->tx_bytes > 0)) {
+
+ /* When xspi in busy condition, bytes may send failed,
+ * then spi control did't work thoroughly, add one byte delay
+ */
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
+ CDNS_SPI_IXR_TXFULL)
+ usleep_range(10, 20);
+
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6f57592a7f95..a056ee88a960 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = {
};
module_platform_driver(spi_imx_driver);
-MODULE_DESCRIPTION("SPI Master Controller driver");
+MODULE_DESCRIPTION("SPI Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 513ec6c6e25b..0ae7defd3492 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
/* SSP register addresses */
void __iomem *ioaddr;
- u32 ssdr_physical;
+ phys_addr_t ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index ae086aab57d5..8171eedbfc90 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
}
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
+ brps = min_t(int, brps, 32);
scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 16cab40156ca..aeab05f682d9 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1799,7 +1799,7 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->dev->of_node = pdata->of_node;
pinctrl = devm_pinctrl_get_select_default(priv->dev);
if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(priv->vdev);
+ ret = PTR_ERR(pinctrl);
dev_dbg(priv->dev,
"devm_pinctrl_get_select_default() failed: %d\n", ret);
if (ret != -ENODEV)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6b5300ca44a6..885f5fcead77 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
}
if (hif_drv->usr_conn_req.ies) {
- conn_info.req_ies = kmemdup(conn_info.req_ies,
+ conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
hif_drv->usr_conn_req.ies_len,
GFP_KERNEL);
if (conn_info.req_ies)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07c814c42648..60429011292a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *sg = &cmd->t_data_sg[0];
- unsigned char *buf, zero = 0x00, *p = &zero;
- int rc, ret;
+ unsigned char *buf, *not_zero;
+ int ret;
buf = kmap(sg_page(sg)) + sg->offset;
if (!buf)
@@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
* Fall back to block_execute_write_same() slow-path if
* incoming WRITE_SAME payload does not contain zeros.
*/
- rc = memcmp(buf, p, cmd->data_length);
+ not_zero = memchr_inv(buf, 0x00, cmd->data_length);
kunmap(sg_page(sg));
- if (rc)
+ if (not_zero)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = blkdev_issue_zeroout(bdev,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d99b242e82e..6cb933ecc084 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bytes = min(bytes, data_len);
if (!bio) {
+new_bio:
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* be allocated with pscsi_get_bio() above.
*/
bio = NULL;
+ goto new_bio;
}
data_len -= bytes;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0124a91c8d71..dd46b758852a 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -238,6 +238,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
if (IS_ERR(shm))
return PTR_ERR(shm);
+ /*
+ * Ensure offset + size does not overflow offset
+ * and does not overflow the size of the referred
+ * shared memory object.
+ */
+ if ((ip.a + ip.b) < ip.a ||
+ (ip.a + ip.b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+
params[n].u.memref.shm_offs = ip.a;
params[n].u.memref.size = ip.b;
params[n].u.memref.shm = shm;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 556960a1bab3..07d3be6f0780 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
+ get_dma_buf(shm->dmabuf);
fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
- if (fd >= 0)
- get_dma_buf(shm->dmabuf);
+ if (fd < 0)
+ dma_buf_put(shm->dmabuf);
return fd;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index b6adc54b96f1..82979880f985 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -15,6 +15,13 @@ menuconfig THERMAL
if THERMAL
+config THERMAL_STATISTICS
+ bool "Thermal state transition statistics"
+ help
+ Export thermal state transition statistics information through sysfs.
+
+ If in doubt, say N.
+
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
depends on THERMAL
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a67781b7a0b2..ee3a215b333a 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -637,6 +637,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@@ -649,9 +652,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
- data->irq_enabled = true;
- data->mode = THERMAL_DEVICE_ENABLED;
-
return 0;
}
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 8a7f24dd9315..0c19fcd56a0d 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
return -EFAULT;
}
+ priv->priv = obj;
obj->max_state = p->package.count - 1;
obj->cdev =
thermal_cooling_device_register(acpi_device_bid(priv->adev),
@@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv)
if (IS_ERR(obj->cdev))
result = PTR_ERR(obj->cdev);
- priv->priv = obj;
-
kfree(buf.pointer);
/* TODO: add ACPI notification support */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index ed805c7c5ace..ac83f721db24 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -185,6 +185,7 @@
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
* @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@ struct exynos_tmu_data {
struct regulator *regulator;
struct thermal_zone_device *tzd;
unsigned int ntrip;
+ bool enabled;
int (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
mutex_lock(&data->lock);
clk_enable(data->clk);
data->tmu_control(pdev, on);
+ data->enabled = on;
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
static int exynos_get_temp(void *p, int *temp)
{
struct exynos_tmu_data *data = p;
+ int value, ret = 0;
- if (!data || !data->tmu_read)
+ if (!data || !data->tmu_read || !data->enabled)
return -EINVAL;
mutex_lock(&data->lock);
clk_enable(data->clk);
- *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+ value = data->tmu_read(data);
+ if (value < 0)
+ ret = value;
+ else
+ *temp = code_to_temp(data, value) * MCELSIUS;
clk_disable(data->clk);
mutex_unlock(&data->lock);
- return 0;
+ return ret;
}
#ifdef CONFIG_THERMAL_EMULATION
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 2b1b0ba393a4..d64325e078db 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -972,8 +972,8 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->ops = ops;
cdev->updated = false;
cdev->device.class = &thermal_class;
- thermal_cooling_device_setup_sysfs(cdev);
cdev->devdata = devdata;
+ thermal_cooling_device_setup_sysfs(cdev);
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
result = device_register(&cdev->device);
if (result) {
@@ -1106,6 +1106,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
ida_simple_remove(&thermal_cdev_ida, cdev->id);
device_unregister(&cdev->device);
+ thermal_cooling_device_destroy_sysfs(cdev);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 27e3b1df7360..5e4150261500 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -73,6 +73,7 @@ int thermal_build_list_of_policies(char *buf);
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
+void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
/* used only at binding time */
ssize_t
thermal_cooling_device_trip_point_show(struct device *,
@@ -84,6 +85,15 @@ ssize_t thermal_cooling_device_weight_store(struct device *,
struct device_attribute *,
const char *, size_t);
+#ifdef CONFIG_THERMAL_STATISTICS
+void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state);
+#else
+static inline void
+thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state) {}
+#endif /* CONFIG_THERMAL_STATISTICS */
+
#ifdef CONFIG_THERMAL_GOV_STEP_WISE
int thermal_gov_step_wise_register(void);
void thermal_gov_step_wise_unregister(void);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 8cdf75adcce1..eb03d7e099bb 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -187,7 +187,10 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
if (instance->target > target)
target = instance->target;
}
- cdev->ops->set_cur_state(cdev, target);
+
+ if (!cdev->ops->set_cur_state(cdev, target))
+ thermal_cooling_device_stats_update(cdev, target);
+
cdev->updated = true;
mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index ba81c9080f6e..23b5e0a709b0 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/jiffies.h>
#include "thermal_core.h"
@@ -721,6 +722,7 @@ thermal_cooling_device_cur_state_store(struct device *dev,
result = cdev->ops->set_cur_state(cdev, state);
if (result)
return result;
+ thermal_cooling_device_stats_update(cdev, state);
return count;
}
@@ -745,14 +747,237 @@ static const struct attribute_group cooling_device_attr_group = {
static const struct attribute_group *cooling_device_attr_groups[] = {
&cooling_device_attr_group,
+ NULL, /* Space allocated for cooling_device_stats_attr_group */
NULL,
};
+#ifdef CONFIG_THERMAL_STATISTICS
+struct cooling_dev_stats {
+ spinlock_t lock;
+ unsigned int total_trans;
+ unsigned long state;
+ unsigned long max_states;
+ ktime_t last_time;
+ ktime_t *time_in_state;
+ unsigned int *trans_table;
+};
+
+static void update_time_in_state(struct cooling_dev_stats *stats)
+{
+ ktime_t now = ktime_get(), delta;
+
+ delta = ktime_sub(now, stats->last_time);
+ stats->time_in_state[stats->state] =
+ ktime_add(stats->time_in_state[stats->state], delta);
+ stats->last_time = now;
+}
+
+void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct cooling_dev_stats *stats = cdev->stats;
+
+ spin_lock(&stats->lock);
+
+ if (stats->state == new_state)
+ goto unlock;
+
+ update_time_in_state(stats);
+ stats->trans_table[stats->state * stats->max_states + new_state]++;
+ stats->state = new_state;
+ stats->total_trans++;
+
+unlock:
+ spin_unlock(&stats->lock);
+}
+
+static ssize_t
+thermal_cooling_device_total_trans_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ int ret;
+
+ spin_lock(&stats->lock);
+ ret = sprintf(buf, "%u\n", stats->total_trans);
+ spin_unlock(&stats->lock);
+
+ return ret;
+}
+
+static ssize_t
+thermal_cooling_device_time_in_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ ssize_t len = 0;
+ int i;
+
+ spin_lock(&stats->lock);
+ update_time_in_state(stats);
+
+ for (i = 0; i < stats->max_states; i++) {
+ len += sprintf(buf + len, "state%u\t%llu\n", i,
+ ktime_to_ms(stats->time_in_state[i]));
+ }
+ spin_unlock(&stats->lock);
+
+ return len;
+}
+
+static ssize_t
+thermal_cooling_device_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ int i, states = stats->max_states;
+
+ spin_lock(&stats->lock);
+
+ stats->total_trans = 0;
+ stats->last_time = ktime_get();
+ memset(stats->trans_table, 0,
+ states * states * sizeof(*stats->trans_table));
+
+ for (i = 0; i < stats->max_states; i++)
+ stats->time_in_state[i] = ktime_set(0, 0);
+
+ spin_unlock(&stats->lock);
+
+ return count;
+}
+
+static ssize_t
+thermal_cooling_device_trans_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ ssize_t len = 0;
+ int i, j;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stats->max_states; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
+ }
+ if (len >= PAGE_SIZE)
+ return PAGE_SIZE;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ for (i = 0; i < stats->max_states; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "state%2u:", i);
+
+ for (j = 0; j < stats->max_states; j++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%8u ",
+ stats->trans_table[i * stats->max_states + j]);
+ }
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+
+ if (len >= PAGE_SIZE) {
+ pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+ return len;
+}
+
+static DEVICE_ATTR(total_trans, 0444, thermal_cooling_device_total_trans_show,
+ NULL);
+static DEVICE_ATTR(time_in_state_ms, 0444,
+ thermal_cooling_device_time_in_state_show, NULL);
+static DEVICE_ATTR(reset, 0200, NULL, thermal_cooling_device_reset_store);
+static DEVICE_ATTR(trans_table, 0444,
+ thermal_cooling_device_trans_table_show, NULL);
+
+static struct attribute *cooling_device_stats_attrs[] = {
+ &dev_attr_total_trans.attr,
+ &dev_attr_time_in_state_ms.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_trans_table.attr,
+ NULL
+};
+
+static const struct attribute_group cooling_device_stats_attr_group = {
+ .attrs = cooling_device_stats_attrs,
+ .name = "stats"
+};
+
+static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
+{
+ struct cooling_dev_stats *stats;
+ unsigned long states;
+ int var;
+
+ if (cdev->ops->get_max_state(cdev, &states))
+ return;
+
+ states++; /* Total number of states is highest state + 1 */
+
+ var = sizeof(*stats);
+ var += sizeof(*stats->time_in_state) * states;
+ var += sizeof(*stats->trans_table) * states * states;
+
+ stats = kzalloc(var, GFP_KERNEL);
+ if (!stats)
+ return;
+
+ stats->time_in_state = (ktime_t *)(stats + 1);
+ stats->trans_table = (unsigned int *)(stats->time_in_state + states);
+ cdev->stats = stats;
+ stats->last_time = ktime_get();
+ stats->max_states = states;
+
+ spin_lock_init(&stats->lock);
+
+ /* Fill the empty slot left in cooling_device_attr_groups */
+ var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
+ cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+}
+
+static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
+{
+ kfree(cdev->stats);
+ cdev->stats = NULL;
+}
+
+#else
+
+static inline void
+cooling_device_stats_setup(struct thermal_cooling_device *cdev) {}
+static inline void
+cooling_device_stats_destroy(struct thermal_cooling_device *cdev) {}
+
+#endif /* CONFIG_THERMAL_STATISTICS */
+
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *cdev)
{
+ cooling_device_stats_setup(cdev);
cdev->device.groups = cooling_device_attr_groups;
}
+void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
+{
+ cooling_device_stats_destroy(cdev);
+}
+
/* these helper will be used only at the time of bindig */
ssize_t
thermal_cooling_device_trip_point_show(struct device *dev,
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 3b3e1f6632d7..1dbe27c9946c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -121,6 +121,9 @@ struct gsm_dlci {
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1364,7 +1367,13 @@ retry:
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
if (debug & 8)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
gsm_dlci_close(dlci);
@@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index a24278380fec..22683393a0f2 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
*/
int __init setup_earlycon(char *buf)
{
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
if (!buf || !buf[0])
return -EINVAL;
@@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
if (early_con.flags & CON_ENABLED)
return -EALREADY;
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
size_t len = strlen(match->name);
if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 91f3a1a5cb7f..c2fc6bef7a6f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
* differ from the value that was last written. As it only
* clears after being set, reread conditionally.
*/
- if (sport->ucr2 & UCR2_SRST)
+ if (!(sport->ucr2 & UCR2_SRST))
sport->ucr2 = readl(sport->port.membase + offset);
return sport->ucr2;
break;
@@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port,
rs485conf->flags &= ~SER_RS485_ENABLED;
if (rs485conf->flags & SER_RS485_ENABLED) {
+ /* Enable receiver if low-active RTS signal is requested */
+ if (sport->have_rtscts && !sport->have_rtsgpio &&
+ !(rs485conf->flags & SER_RS485_RTS_ON_SEND))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
/* disable transmitter */
ucr2 = imx_uart_readl(sport, UCR2);
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
@@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev)
(!sport->have_rtscts && !sport->have_rtsgpio))
dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
+ /*
+ * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
+ * signal cannot be set low during transmission in case the
+ * receiver is off (limitation of the i.MX UART IP).
+ */
+ if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+ sport->have_rtscts && !sport->have_rtsgpio &&
+ (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
+ !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
+ dev_err(&pdev->dev,
+ "low-active RTS not possible when receiver is off, enabling receiver\n");
+
imx_uart_rs485_config(&sport->port, &sport->port.rs485);
/* Disable interrupts before requesting them */
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 750e5645dc85..f503fab1e268 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
termios->c_cflag &= CREAD | CBAUD;
termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
- termios->c_lflag = old->c_lflag;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 65ff669373d4..a1b3eb04cb32 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
struct qcom_geni_serial_port *port;
struct uart_port *uport;
struct resource *res;
+ int irq;
if (pdev->dev.of_node)
line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
- uport->irq = platform_get_irq(pdev, 0);
- if (uport->irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq);
- return uport->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ return irq;
}
+ uport->irq = irq;
uport->private_data = &qcom_geni_console_driver;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index abcb4d09a2d8..bd72dd843338 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
/* only set baud if specified on command line - otherwise
* assume it has been initialized by a boot loader.
*/
- if (device->baud) {
+ if (port->uartclk && device->baud) {
u32 cd = 0, bdiv = 0;
u32 mr;
int div8;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 63114ea35ec1..7c838b90a31d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 050f4d650891..fb7329ab2b37 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
/* There is an outstanding reference here so this is safe */
- old = tty_ldisc_get(tty, old->ops->num);
- WARN_ON(IS_ERR(old));
- tty->ldisc = old;
- tty_set_termios_ldisc(tty, old->ops->num);
- if (tty_ldisc_open(tty, old) < 0) {
- tty_ldisc_put(old);
+ if (tty_ldisc_failto(tty, old->ops->num) < 0) {
+ const char *name = tty_name(tty);
+
+ pr_warn("Falling back ldisc for %s.\n", name);
/* The traditional behaviour is to fall back to N_TTY, we
want to avoid falling back to N_NULL unless we have no
choice to avoid the risk of breaking anything */
if (tty_ldisc_failto(tty, N_TTY) < 0 &&
tty_ldisc_failto(tty, N_NULL) < 0)
- panic("Couldn't open N_NULL ldisc for %s.",
- tty_name(tty));
+ panic("Couldn't open N_NULL ldisc for %s.", name);
}
}
@@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index f695a7e8c314..c690d100adcd 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -19,7 +19,7 @@
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
-
+#define DEBUG 1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
@@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
*/
static void hv_uio_channel_cb(void *context)
{
- struct hv_uio_private_data *pdata = context;
- struct hv_device *dev = pdata->device;
+ struct vmbus_channel *chan = context;
+ struct hv_device *hv_dev = chan->device_obj;
+ struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
uio_event_notify(&pdata->info);
@@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
uio_event_notify(&pdata->info);
}
-/*
- * Handle fault when looking for sub channel ring buffer
- * Subchannel ring buffer is same as resource 0 which is main ring buffer
- * This is derived from uio_vma_fault
+/* Sysfs API to allow mmap of the ring buffers
+ * The ring buffer is allocated as contiguous memory by vmbus_open
*/
-static int hv_uio_vma_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- void *ring_buffer = vma->vm_private_data;
- struct page *page;
- void *addr;
-
- addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
- page = virt_to_page(addr);
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct hv_uio_vm_ops = {
- .fault = hv_uio_vma_fault,
-};
-
-/* Sysfs API to allow mmap of the ring buffers */
static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
- unsigned long requested_pages, actual_pages;
-
- if (vma->vm_end < vma->vm_start)
- return -EINVAL;
-
- /* only allow 0 for now */
- if (vma->vm_pgoff > 0)
- return -EINVAL;
+ struct hv_device *dev = channel->primary_channel->device_obj;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
- requested_pages = vma_pages(vma);
- actual_pages = 2 * HV_RING_SIZE;
- if (requested_pages > actual_pages)
- return -EINVAL;
+ dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
+ q_idx, vma_pages(vma), vma->vm_pgoff);
- vma->vm_private_data = channel->ringbuffer_pages;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &hv_uio_vm_ops;
- return 0;
+ return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+ channel->ringbuffer_pagecount << PAGE_SHIFT);
}
-static struct bin_attribute ring_buffer_bin_attr __ro_after_init = {
+static const struct bin_attribute ring_buffer_bin_attr = {
.attr = {
.name = "ring",
.mode = 0600,
- /* size is set at init time */
},
+ .size = 2 * HV_RING_SIZE * PAGE_SIZE,
.mmap = hv_uio_ring_mmap,
};
-/* Callback from VMBUS subystem when new channel created. */
+/* Callback from VMBUS subsystem when new channel created. */
static void
hv_uio_new_channel(struct vmbus_channel *new_sc)
{
struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
struct device *device = &hv_dev->device;
- struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
int ret;
/* Create host communication ring */
ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
- hv_uio_channel_cb, pdata);
+ hv_uio_channel_cb, new_sc);
if (ret) {
dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
return;
@@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
HV_RING_SIZE * PAGE_SIZE, NULL, 0,
- hv_uio_channel_cb, pdata);
+ hv_uio_channel_cb, dev->channel);
if (ret)
goto fail;
@@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev,
vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+ ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ if (ret)
+ dev_notice(&dev->device,
+ "sysfs create ring bin file failed; %d\n", ret);
+
hv_set_drvdata(dev, pdata);
return 0;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 75f7fb151f71..987fc5ba6321 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -207,5 +207,6 @@ config USB_ULPI_BUS
config USB_ROLE_SWITCH
tristate
+ select USB_COMMON
endif # USB_SUPPORT
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c821b4b9647e..7b5cb28ffb35 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
- [USB_ENDPOINT_XFER_BULK] = 512,
+
+ /* Bulk should be 512, but some devices use 1024: we will warn below */
+ [USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 777036ae6367..0a42c5df3c0f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
hcd->state = HC_STATE_SUSPENDED;
if (!PMSG_IS_AUTO(msg))
- usb_phy_roothub_power_off(hcd->phy_roothub);
+ usb_phy_roothub_suspend(hcd->self.sysdev,
+ hcd->phy_roothub);
/* Did we race with a root-hub wakeup event? */
if (rhdev->do_remote_wakeup) {
@@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
}
if (!PMSG_IS_AUTO(msg)) {
- status = usb_phy_roothub_power_on(hcd->phy_roothub);
+ status = usb_phy_roothub_resume(hcd->self.sysdev,
+ hcd->phy_roothub);
if (status)
return status;
}
@@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
}
} else {
hcd->state = old_state;
- usb_phy_roothub_power_off(hcd->phy_roothub);
+ usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
"resume", status);
if (status != -ESHUTDOWN)
@@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
@@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
- hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev);
+ hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
if (IS_ERR(hcd->phy_roothub)) {
retval = PTR_ERR(hcd->phy_roothub);
- goto err_phy_roothub_init;
+ goto err_phy_roothub_alloc;
}
+ retval = usb_phy_roothub_init(hcd->phy_roothub);
+ if (retval)
+ goto err_phy_roothub_alloc;
+
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
if (retval)
goto err_usb_phy_roothub_power_on;
@@ -2936,7 +2943,7 @@ err_create_buf:
usb_phy_roothub_power_off(hcd->phy_roothub);
err_usb_phy_roothub_power_on:
usb_phy_roothub_exit(hcd->phy_roothub);
-err_phy_roothub_init:
+err_phy_roothub_alloc:
if (hcd->remove_phy && hcd->usb_phy) {
usb_phy_shutdown(hcd->usb_phy);
usb_put_phy(hcd->usb_phy);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f6ea16e9f6bb..aa9968d90a48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 09b7c43c0ea4..9879767452a2 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,19 +19,6 @@ struct usb_phy_roothub {
struct list_head list;
};
-static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
-{
- struct usb_phy_roothub *roothub_entry;
-
- roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
- if (!roothub_entry)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&roothub_entry->list);
-
- return roothub_entry;
-}
-
static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
@@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
return PTR_ERR(phy);
}
- roothub_entry = usb_phy_roothub_alloc(dev);
- if (IS_ERR(roothub_entry))
- return PTR_ERR(roothub_entry);
+ roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
+ if (!roothub_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&roothub_entry->list);
roothub_entry->phy = phy;
@@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
return 0;
}
-struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
+struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
{
struct usb_phy_roothub *phy_roothub;
- struct usb_phy_roothub *roothub_entry;
- struct list_head *head;
int i, num_phys, err;
+ if (!IS_ENABLED(CONFIG_GENERIC_PHY))
+ return NULL;
+
num_phys = of_count_phandle_with_args(dev->of_node, "phys",
"#phy-cells");
if (num_phys <= 0)
return NULL;
- phy_roothub = usb_phy_roothub_alloc(dev);
- if (IS_ERR(phy_roothub))
- return phy_roothub;
+ phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+ if (!phy_roothub)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&phy_roothub->list);
for (i = 0; i < num_phys; i++) {
err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
if (err)
- goto err_out;
+ return ERR_PTR(err);
}
+ return phy_roothub;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
+
+int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
@@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
goto err_exit_phys;
}
- return phy_roothub;
+ return 0;
err_exit_phys:
list_for_each_entry_continue_reverse(roothub_entry, head, list)
phy_exit(roothub_entry->phy);
-err_out:
- return ERR_PTR(err);
+ return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_init);
@@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
list_for_each_entry(roothub_entry, head, list) {
err = phy_exit(roothub_entry->phy);
if (err)
- ret = ret;
+ ret = err;
}
return ret;
@@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub)
phy_power_off(roothub_entry->phy);
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off);
+
+int usb_phy_roothub_suspend(struct device *controller_dev,
+ struct usb_phy_roothub *phy_roothub)
+{
+ usb_phy_roothub_power_off(phy_roothub);
+
+ /* keep the PHYs initialized so the device can wake up the system */
+ if (device_may_wakeup(controller_dev))
+ return 0;
+
+ return usb_phy_roothub_exit(phy_roothub);
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend);
+
+int usb_phy_roothub_resume(struct device *controller_dev,
+ struct usb_phy_roothub *phy_roothub)
+{
+ int err;
+
+ /* if the device can't wake up the system _exit was called */
+ if (!device_may_wakeup(controller_dev)) {
+ err = usb_phy_roothub_init(phy_roothub);
+ if (err)
+ return err;
+ }
+
+ err = usb_phy_roothub_power_on(phy_roothub);
+
+ /* undo _init if _power_on failed */
+ if (err && !device_may_wakeup(controller_dev))
+ usb_phy_roothub_exit(phy_roothub);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 6fde59bfbff8..88a3c037e9df 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -1,7 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * USB roothub wrapper
+ *
+ * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#ifndef __USB_CORE_PHY_H_
+#define __USB_CORE_PHY_H_
+
+struct device;
struct usb_phy_roothub;
-struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev);
+struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
+
+int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
+
+int usb_phy_roothub_suspend(struct device *controller_dev,
+ struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_resume(struct device *controller_dev,
+ struct usb_phy_roothub *phy_roothub);
+
+#endif /* __USB_CORE_PHY_H_ */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 920f48a49a87..c55def2f1320 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d83be5651f87..a666e0758a99 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -985,6 +985,7 @@ struct dwc2_hsotg {
/* DWC OTG HW Release versions */
#define DWC2_CORE_REV_2_71a 0x4f54271a
+#define DWC2_CORE_REV_2_72a 0x4f54272a
#define DWC2_CORE_REV_2_80a 0x4f54280a
#define DWC2_CORE_REV_2_90a 0x4f54290a
#define DWC2_CORE_REV_2_91a 0x4f54291a
@@ -992,6 +993,7 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_2_94a 0x4f54294a
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
+#define DWC2_CORE_REV_4_00a 0x4f54400a
#define DWC2_FS_IOT_REV_1_00a 0x5531100a
#define DWC2_HS_IOT_REV_1_00a 0x5532100a
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 6c32bf26e48e..83cb5577a52f 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3928,6 +3928,27 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
if (index && !hs_ep->isochronous)
epctrl |= DXEPCTL_SETD0PID;
+ /* WA for Full speed ISOC IN in DDMA mode.
+ * By Clear NAK status of EP, core will send ZLP
+ * to IN token and assert NAK interrupt relying
+ * on TxFIFO status only
+ */
+
+ if (hsotg->gadget.speed == USB_SPEED_FULL &&
+ hs_ep->isochronous && dir_in) {
+ /* The WA applies only to core versions from 2.72a
+ * to 4.00a (including both). Also for FS_IOT_1.00a
+ * and HS_IOT_1.00a.
+ */
+ u32 gsnpsid = dwc2_readl(hsotg->regs + GSNPSID);
+
+ if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
+ gsnpsid <= DWC2_CORE_REV_4_00a) ||
+ gsnpsid == DWC2_FS_IOT_REV_1_00a ||
+ gsnpsid == DWC2_HS_IOT_REV_1_00a)
+ epctrl |= DXEPCTL_CNAK;
+ }
+
dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
__func__, epctrl);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 190f95964000..c51b73b3e048 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -358,9 +358,14 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{
+ int ret;
+
hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
- if (IS_ERR(hsotg->vbus_supply))
- return 0;
+ if (IS_ERR(hsotg->vbus_supply)) {
+ ret = PTR_ERR(hsotg->vbus_supply);
+ hsotg->vbus_supply = NULL;
+ return ret == -ENODEV ? 0 : ret;
+ }
return regulator_enable(hsotg->vbus_supply);
}
@@ -4342,9 +4347,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_vbus_supply_init(hsotg);
-
- return 0;
+ return dwc2_vbus_supply_init(hsotg);
}
/*
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 7f21747007f1..bea2e8ec0369 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -141,8 +141,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
goto err;
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
+ if (!glue) {
+ ret = -ENOMEM;
goto err;
+ }
ret = platform_device_add(dwc2);
if (ret) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8796a5ee9bb9..0dedf8a799f4 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -166,7 +166,7 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
dwc3_ep_inc_trb(&dep->trb_dequeue);
}
-void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
+static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
struct dwc3_request *req, int status)
{
struct dwc3 *dwc = dep->dwc;
@@ -1424,7 +1424,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
dwc->lock);
if (!r->trb)
- goto out1;
+ goto out0;
if (r->num_pending_sgs) {
struct dwc3_trb *trb;
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 7889bcc0509a..8b72b192c747 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -221,7 +221,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
netif_wake_queue(dev);
}
-static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct phonet_port *port = netdev_priv(dev);
struct f_phonet *fp;
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 4c6c08b675b5..21307d862af6 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -73,9 +73,10 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
if (!qh)
goto done;
qh->hw = (struct ehci_qh_hw *)
- dma_pool_zalloc(ehci->qh_pool, flags, &dma);
+ dma_pool_alloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
+ memset(qh->hw, 0, sizeof *qh->hw);
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 28e2a338b481..e56db44708bc 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1287,7 +1287,7 @@ itd_urb_transaction(
} else {
alloc_itd:
spin_unlock_irqrestore(&ehci->lock, flags);
- itd = dma_pool_zalloc(ehci->itd_pool, mem_flags,
+ itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
&itd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!itd) {
@@ -1297,6 +1297,7 @@ itd_urb_transaction(
}
}
+ memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
itd->frame = NO_FRAME;
list_add(&itd->itd_list, &sched->td_list);
@@ -2080,7 +2081,7 @@ sitd_urb_transaction(
} else {
alloc_sitd:
spin_unlock_irqrestore(&ehci->lock, flags);
- sitd = dma_pool_zalloc(ehci->sitd_pool, mem_flags,
+ sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
&sitd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!sitd) {
@@ -2090,6 +2091,7 @@ sitd_urb_transaction(
}
}
+ memset(sitd, 0, sizeof(*sitd));
sitd->sitd_dma = sitd_dma;
sitd->frame = NO_FRAME;
list_add(&sitd->sitd_list, &iso_sched->td_list);
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 48779c44c361..eb494ec547e8 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
void xhci_dbc_tty_unregister_driver(void)
{
- tty_unregister_driver(dbc_tty_driver);
- put_tty_driver(dbc_tty_driver);
- dbc_tty_driver = NULL;
+ if (dbc_tty_driver) {
+ tty_unregister_driver(dbc_tty_driver);
+ put_tty_driver(dbc_tty_driver);
+ dbc_tty_driver = NULL;
+ }
}
static void dbc_rx_push(unsigned long _port)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 72ebbc908e19..32cd52ca8318 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i])
+ if (!xhci->devs[i] || !xhci->devs[i]->udev)
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f17b7eab66cf..85ffda85f8ab 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
- if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ (pdev->device == 0x15e0 ||
+ pdev->device == 0x15e1 ||
+ pdev->device == 0x43bb))
xhci->quirks |= XHCI_SUSPEND_DELAY;
if (pdev->vendor == PCI_VENDOR_ID_AMD)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index df327dcc2bac..c1b22fc64e38 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
struct clk *clk;
+ struct clk *reg_clk;
int ret;
int irq;
@@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
/*
- * Not all platforms have a clk so it is not an error if the
- * clock does not exists.
+ * Not all platforms have clks so it is not an error if the
+ * clock do not exist.
*/
+ reg_clk = devm_clk_get(&pdev->dev, "reg");
+ if (!IS_ERR(reg_clk)) {
+ ret = clk_prepare_enable(reg_clk);
+ if (ret)
+ goto put_hcd;
+ } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
+ }
+
clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
ret = clk_prepare_enable(clk);
if (ret)
- goto put_hcd;
+ goto disable_reg_clk;
} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto put_hcd;
+ goto disable_reg_clk;
}
xhci = hcd_to_xhci(hcd);
@@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
xhci->clk = clk;
+ xhci->reg_clk = reg_clk;
xhci->main_hcd = hcd;
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
@@ -320,8 +332,10 @@ put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
disable_clk:
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
+
+disable_reg_clk:
+ clk_disable_unprepare(reg_clk);
put_hcd:
usb_put_hcd(hcd);
@@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
+ struct clk *reg_clk = xhci->reg_clk;
xhci->xhc_state |= XHCI_STATE_REMOVING;
@@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev)
usb_remove_hcd(hcd);
usb_put_hcd(xhci->shared_hcd);
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
+ clk_disable_unprepare(reg_clk);
usb_put_hcd(hcd);
pm_runtime_set_suspended(&dev->dev);
@@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
- .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = &xhci_plat_pm_ops,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9b27798ecce5..711da3306b14 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3621,6 +3621,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
xhci_debugfs_remove_slot(xhci, udev->slot_id);
+ virt_dev->udev = NULL;
ret = xhci_disable_slot(xhci, udev->slot_id);
if (ret)
xhci_free_virt_device(xhci, udev->slot_id);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 05c909b04f14..6dfc4867dbcf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1729,8 +1729,9 @@ struct xhci_hcd {
int page_shift;
/* msi-x vectors */
int msix_count;
- /* optional clock */
+ /* optional clocks */
struct clk *clk;
+ struct clk *reg_clk;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 05a679d5e3a2..6a60bc0490c5 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb)
if (!rev)
return -ENODEV;
- usb_phy_init(musb->xceiv);
if (IS_ERR(musb->phy)) {
musb->phy = NULL;
} else {
@@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb)
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
del_timer_sync(&musb->dev_timer);
- usb_phy_shutdown(musb->xceiv);
phy_power_off(musb->phy);
phy_exit(musb->phy);
debugfs_remove_recursive(glue->dbgfs_root);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index e564695c6c8d..71c5835ea9cd 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -417,7 +417,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
req = next_request(musb_ep);
request = &req->request;
- trace_musb_req_tx(req);
csr = musb_readw(epio, MUSB_TXCSR);
musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
@@ -456,6 +455,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
u8 is_dma = 0;
bool short_packet = false;
+ trace_musb_req_tx(req);
+
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3a8451a15f7f..15a42cee0a9c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -990,7 +990,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
- musb_start_urb(musb, is_in, next_qh);
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
}
}
@@ -2522,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
+ int ret;
- musb_port_suspend(musb, true);
+ ret = musb_port_suspend(musb, true);
+ if (ret)
+ return ret;
if (!is_host_active(musb))
return 0;
@@ -2754,6 +2759,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
hcd->self.otg_port = 1;
musb->xceiv->otg->host = &hcd->self;
hcd->power_budget = 2 * (power_budget ? : 250);
+ hcd->skip_phy_initialization = 1;
ret = usb_add_hcd(hcd, 0, 0);
if (ret < 0)
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 72392bbcd0a4..2999845632ce 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
-extern void musb_port_suspend(struct musb *musb, bool do_suspend);
+extern int musb_port_suspend(struct musb *musb, bool do_suspend);
extern void musb_port_reset(struct musb *musb, bool do_reset);
extern void musb_host_finish_resume(struct work_struct *work);
#else
@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
static inline void musb_host_resume_root_hub(struct musb *musb) {}
static inline void musb_host_poll_rh_status(struct musb *musb) {}
static inline void musb_host_poke_root_hub(struct musb *musb) {}
-static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
+static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ return 0;
+}
static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
static inline void musb_host_finish_resume(struct work_struct *work) {}
#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 5165d2b07ade..2f8dd9826e94 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work)
spin_unlock_irqrestore(&musb->lock, flags);
}
-void musb_port_suspend(struct musb *musb, bool do_suspend)
+int musb_port_suspend(struct musb *musb, bool do_suspend)
{
struct usb_otg *otg = musb->xceiv->otg;
u8 power;
void __iomem *mbase = musb->mregs;
if (!is_host_active(musb))
- return;
+ return 0;
/* NOTE: this doesn't necessarily put PHY into low power mode,
* turning off its clock; that's a function of PHY integration and
@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
if (do_suspend) {
int retries = 10000;
- power &= ~MUSB_POWER_RESUME;
- power |= MUSB_POWER_SUSPENDM;
- musb_writeb(mbase, MUSB_POWER, power);
+ if (power & MUSB_POWER_RESUME)
+ return -EBUSY;
- /* Needed for OPT A tests */
- power = musb_readb(mbase, MUSB_POWER);
- while (power & MUSB_POWER_SUSPENDM) {
+ if (!(power & MUSB_POWER_SUSPENDM)) {
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
- if (retries-- < 1)
- break;
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
}
musb_dbg(musb, "Root port suspended, power %02x", power);
@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
+ return 0;
}
void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a646820f5a78..533f127c30ad 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index de1e759dd512..eb6c26cbe579 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 87202ad5a50d..7ea221d42dba 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c3f252283ab9..2058852a87fa 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -1065,6 +1067,9 @@ static const struct usb_device_id option_ids[] = {
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
.driver_info = RSVD(1) | RSVD(4) },
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
.driver_info = RSVD(4) },
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4ef79e29cb26..40864c2bd9dc 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index f5373ed2cd45..8ddbecc25d89 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -335,47 +335,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
goto exit;
}
- if (retval == sizeof(*connection_info)) {
- connection_info = (struct visor_connection_info *)
- transfer_buffer;
-
- num_ports = le16_to_cpu(connection_info->num_ports);
- for (i = 0; i < num_ports; ++i) {
- switch (
- connection_info->connections[i].port_function_id) {
- case VISOR_FUNCTION_GENERIC:
- string = "Generic";
- break;
- case VISOR_FUNCTION_DEBUGGER:
- string = "Debugger";
- break;
- case VISOR_FUNCTION_HOTSYNC:
- string = "HotSync";
- break;
- case VISOR_FUNCTION_CONSOLE:
- string = "Console";
- break;
- case VISOR_FUNCTION_REMOTE_FILE_SYS:
- string = "Remote File System";
- break;
- default:
- string = "unknown";
- break;
- }
- dev_info(dev, "%s: port %d, is for %s use\n",
- serial->type->description,
- connection_info->connections[i].port, string);
- }
+ if (retval != sizeof(*connection_info)) {
+ dev_err(dev, "Invalid connection information received from device\n");
+ retval = -ENODEV;
+ goto exit;
}
- /*
- * Handle devices that report invalid stuff here.
- */
+
+ connection_info = (struct visor_connection_info *)transfer_buffer;
+
+ num_ports = le16_to_cpu(connection_info->num_ports);
+
+ /* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
+ for (i = 0; i < num_ports; ++i) {
+ switch (connection_info->connections[i].port_function_id) {
+ case VISOR_FUNCTION_GENERIC:
+ string = "Generic";
+ break;
+ case VISOR_FUNCTION_DEBUGGER:
+ string = "Debugger";
+ break;
+ case VISOR_FUNCTION_HOTSYNC:
+ string = "HotSync";
+ break;
+ case VISOR_FUNCTION_CONSOLE:
+ string = "Console";
+ break;
+ case VISOR_FUNCTION_REMOTE_FILE_SYS:
+ string = "Remote File System";
+ break;
+ default:
+ string = "unknown";
+ break;
+ }
+ dev_info(dev, "%s: port %d, is for %s use\n",
+ serial->type->description,
+ connection_info->connections[i].port, string);
+ }
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 677d12138dbd..ded49e3bf2b0 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -3725,6 +3725,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
typec_unregister_port(port->typec_port);
+ usb_role_switch_put(port->role_sw);
tcpm_debugfs_exit(port);
destroy_workqueue(port->wq);
}
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 8b8406867c02..4b4c8d271b27 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -73,6 +73,7 @@ struct tps6598x {
struct device *dev;
struct regmap *regmap;
struct mutex lock; /* device lock */
+ u8 i2c_protocol:1;
struct typec_port *port;
struct typec_partner *partner;
@@ -80,19 +81,39 @@ struct tps6598x {
struct typec_capability typec_cap;
};
+static int
+tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
+{
+ u8 data[len + 1];
+ int ret;
+
+ if (!tps->i2c_protocol)
+ return regmap_raw_read(tps->regmap, reg, val, len);
+
+ ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data));
+ if (ret)
+ return ret;
+
+ if (data[0] < len)
+ return -EIO;
+
+ memcpy(val, &data[1], len);
+ return 0;
+}
+
static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
{
- return regmap_raw_read(tps->regmap, reg, val, sizeof(u16));
+ return tps6598x_block_read(tps, reg, val, sizeof(u16));
}
static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
{
- return regmap_raw_read(tps->regmap, reg, val, sizeof(u32));
+ return tps6598x_block_read(tps, reg, val, sizeof(u32));
}
static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
{
- return regmap_raw_read(tps->regmap, reg, val, sizeof(u64));
+ return tps6598x_block_read(tps, reg, val, sizeof(u64));
}
static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
@@ -121,8 +142,8 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps)
struct tps6598x_rx_identity_reg id;
int ret;
- ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP,
- &id, sizeof(id));
+ ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP,
+ &id, sizeof(id));
if (ret)
return ret;
@@ -224,13 +245,13 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
} while (val);
if (out_len) {
- ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1,
- out_data, out_len);
+ ret = tps6598x_block_read(tps, TPS_REG_DATA1,
+ out_data, out_len);
if (ret)
return ret;
val = out_data[0];
} else {
- ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val);
+ ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8));
if (ret)
return ret;
}
@@ -385,6 +406,16 @@ static int tps6598x_probe(struct i2c_client *client)
if (!vid)
return -ENODEV;
+ /*
+ * Checking can the adapter handle SMBus protocol. If it can not, the
+ * driver needs to take care of block reads separately.
+ *
+ * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol
+ * unconditionally if the adapter has I2C_FUNC_I2C set.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ tps->i2c_protocol = true;
+
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index b57891c1fd31..7afbea512207 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
typec_ucsi-y := ucsi.o
-typec_ucsi-$(CONFIG_FTRACE) += trace.o
+typec_ucsi-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bf0977fbd100..bd5cca5632b3 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -28,7 +28,7 @@
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
-#define UCSI_TIMEOUT_MS 1000
+#define UCSI_TIMEOUT_MS 5000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 14a72357800a..35618ceb2791 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -73,6 +73,7 @@ struct bus_id_priv {
struct stub_device *sdev;
struct usb_device *udev;
char shutdown_busid;
+ spinlock_t busid_lock;
};
/* stub_priv is allocated from stub_priv_cache */
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
int del_match_busid(char *busid);
void stub_device_cleanup_urbs(struct stub_device *sdev);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index dd8ef36ab10e..c0d6ff1baa72 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
- int rc;
+ int rc = 0;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter probe\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
* other matched drivers by the driver core.
* See driver_probe_device() in driver/base/dd.c
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
"%s is attached on vhci_hcd... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
/* ok, this is my device */
sdev = stub_device_alloc(udev);
- if (!sdev)
- return -ENOMEM;
+ if (!sdev) {
+ rc = -ENOMEM;
+ goto call_put_busid_priv;
+ }
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
}
busid_priv->status = STUB_BUSID_ALLOC;
- return 0;
+ rc = 0;
+ goto call_put_busid_priv;
+
err_files:
usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
@@ -379,6 +386,9 @@ err_port:
busid_priv->sdev = NULL;
stub_device_free(sdev);
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
return rc;
}
@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
struct bus_id_priv *busid_priv;
int rc;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter disconnect\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
- return;
+ goto call_put_busid_priv;
}
dev_set_drvdata(&udev->dev, NULL);
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
- return;
+ goto call_put_busid_priv;
}
/* If usb reset is called from event handler */
if (usbip_in_eh(current))
- return;
+ goto call_put_busid_priv;
/* shutdown the current connection */
shutdown_busid(busid_priv);
@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
busid_priv->sdev = NULL;
stub_device_free(sdev);
- if (busid_priv->status == STUB_BUSID_ALLOC) {
+ if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
- } else {
- busid_priv->status = STUB_BUSID_OTHER;
- del_match_busid((char *)udev_busid);
- }
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index c31c8402a0c5..bf8a5feb0ee9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -14,6 +14,7 @@
#define DRIVER_DESC "USB/IP Host Driver"
struct kmem_cache *stub_priv_cache;
+
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
static void init_busid_table(void)
{
+ int i;
+
/*
* This also sets the bus_table[i].status to
* STUB_BUSID_OTHER, which is 0.
@@ -32,6 +35,9 @@ static void init_busid_table(void)
memset(busid_table, 0, sizeof(busid_table));
spin_lock_init(&busid_table_lock);
+
+ for (i = 0; i < MAX_BUSID; i++)
+ spin_lock_init(&busid_table[i].busid_lock);
}
/*
@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
int i;
int idx = -1;
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
idx = i;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
return idx;
}
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
struct bus_id_priv *get_busid_priv(const char *busid)
{
int idx;
@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
- if (idx >= 0)
+ if (idx >= 0) {
bid = &(busid_table[idx]);
+ /* get busid_lock before returning */
+ spin_lock(&bid->busid_lock);
+ }
spin_unlock(&busid_table_lock);
return bid;
}
+void put_busid_priv(struct bus_id_priv *bid)
+{
+ if (bid)
+ spin_unlock(&bid->busid_lock);
+}
+
static int add_match_busid(char *busid)
{
int i;
@@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
goto out;
}
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
strlcpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
ret = 0;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
out:
spin_unlock(&busid_table_lock);
@@ -107,6 +131,8 @@ int del_match_busid(char *busid)
/* found */
ret = 0;
+ spin_lock(&busid_table[idx].busid_lock);
+
if (busid_table[idx].status == STUB_BUSID_OTHER)
memset(busid_table[idx].name, 0, BUSID_SIZE);
@@ -114,6 +140,7 @@ int del_match_busid(char *busid)
(busid_table[idx].status != STUB_BUSID_ADDED))
busid_table[idx].status = STUB_BUSID_REMOV;
+ spin_unlock(&busid_table[idx].busid_lock);
out:
spin_unlock(&busid_table_lock);
@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
char *out = buf;
spin_lock(&busid_table_lock);
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
out += sprintf(out, "%s ", busid_table[i].name);
+ spin_unlock(&busid_table[i].busid_lock);
+ }
spin_unlock(&busid_table_lock);
out += sprintf(out, "\n");
@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(match_busid);
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+ int ret;
+
+ /* device_attach() callers should hold parent lock for USB */
+ if (busid_priv->udev->dev.parent)
+ device_lock(busid_priv->udev->dev.parent);
+ ret = device_attach(&busid_priv->udev->dev);
+ if (busid_priv->udev->dev.parent)
+ device_unlock(busid_priv->udev->dev.parent);
+ if (ret < 0) {
+ dev_err(&busid_priv->udev->dev, "rebind failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+ struct bus_id_priv *busid_priv;
+ int i;
+
+ /* update status to STUB_BUSID_OTHER so probe ignores the device */
+ spin_lock(&busid_table_lock);
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ busid_priv->status = STUB_BUSID_OTHER;
+ }
+ }
+ spin_unlock(&busid_table_lock);
+
+ /* now run rebind - no need to hold locks. driver files are removed */
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ do_rebind(busid_table[i].name, busid_priv);
+ }
+ }
+#endif
+}
+
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
size_t count)
{
@@ -186,11 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
- ret = device_attach(&bid->udev->dev);
- if (ret < 0) {
- dev_err(&bid->udev->dev, "rebind failed\n");
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
+ /* release the busid lock */
+ put_busid_priv(bid);
+
+ ret = do_rebind((char *) buf, bid);
+ if (ret < 0)
return ret;
- }
+
+ /* delete device from busid_table */
+ del_match_busid((char *) buf);
return count;
}
@@ -312,6 +393,9 @@ static void __exit usbip_host_exit(void)
*/
usb_deregister_device_driver(&stub_driver);
+ /* initiate scan to attach devices */
+ stub_device_rebind();
+
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 473fb8a87289..bf8afe9b5883 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -243,7 +243,7 @@ enum usbip_side {
#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 5b4c0864ad92..5d88917c9631 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
unset_event(ud, USBIP_EH_UNUSABLE);
}
- /* Stop the error handler. */
- if (ud->event & USBIP_EH_BYE)
- usbip_dbg_eh("removed %p\n", ud);
-
wake_up(&ud->eh_waitq);
}
}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 20e3d4609583..d11f3f8dad40 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(" ClearHubFeature\n");
break;
case ClearPortFeature:
+ if (rhport < 0)
+ goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
@@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
+ if (rhport < 0)
+ goto error;
+
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
+ if (rhport < 0)
+ goto error;
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
@@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_BH_PORT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
+ if (rhport < 0)
+ goto error;
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
+ if (rhport < 0)
+ goto error;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
@@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
+ if (rhport < 0)
+ goto error;
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index bbf38befefb2..c4b49fca4871 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -46,8 +46,10 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
#define VHOST_NET_WEIGHT 0x80000
/* Max number of packets transferred before requeueing the job.
- * Using this limit prevents one virtqueue from starving rx. */
-#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2)
+ * Using this limit prevents one virtqueue from starving others with small
+ * pkts.
+ */
+#define VHOST_NET_PKT_WEIGHT 256
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
@@ -587,7 +589,7 @@ static void handle_tx(struct vhost_net *net)
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
- unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) {
+ unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) {
vhost_poll_queue(&vq->poll);
break;
}
@@ -769,6 +771,7 @@ static void handle_rx(struct vhost_net *net)
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
+ int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, 0);
sock = vq->private_data;
@@ -872,7 +875,8 @@ static void handle_rx(struct vhost_net *net)
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
+ unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) {
vhost_poll_queue(&vq->poll);
goto out;
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 190dbf8cfcb5..2f3856a95856 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
}
out:
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
kfree(pages);
}
@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
rc = vbg_req_perform(gdev, req);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
if (rc < 0) {
vbg_err("%s error: %d\n", __func__, rc);
@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
ret = vbg_status_code_to_errno(rc);
out_free:
- kfree(req2);
- kfree(req1);
+ vbg_req_free(req2, sizeof(*req2));
+ vbg_req_free(req1, sizeof(*req1));
return ret;
}
@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
rc = VINF_SUCCESS;
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
rc = vbg_req_perform(gdev, req);
do_div(req->interval_ns, 1000000); /* ns -> ms */
gdev->heartbeat_interval_ms = req->interval_ns;
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
if (ret < 0)
return ret;
- /*
- * Preallocate the request to use it from the timer callback because:
- * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
- * and the timer callback runs at DISPATCH_LEVEL;
- * 2) avoid repeated allocations.
- */
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
VMMDEVREQ_GUEST_HEARTBEAT);
@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
{
del_timer_sync(&gdev->heartbeat_timer);
vbg_heartbeat_host_config(gdev, false);
- kfree(gdev->guest_heartbeat_req);
-
+ vbg_req_free(gdev->guest_heartbeat_req,
+ sizeof(*gdev->guest_heartbeat_req));
}
/**
@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
out:
mutex_unlock(&gdev->session_mutex);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return ret;
}
@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
out:
mutex_unlock(&gdev->session_mutex);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return ret;
}
@@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
rc = vbg_req_perform(gdev, req);
ret = vbg_status_code_to_errno(rc);
- if (ret)
+ if (ret) {
+ vbg_err("%s error: %d\n", __func__, rc);
goto out;
+ }
snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
req->major, req->minor, req->build, req->revision);
@@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
}
out:
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return ret;
}
@@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
return 0;
err_free_reqs:
- kfree(gdev->mouse_status_req);
- kfree(gdev->ack_events_req);
- kfree(gdev->cancel_req);
- kfree(gdev->mem_balloon.change_req);
- kfree(gdev->mem_balloon.get_req);
+ vbg_req_free(gdev->mouse_status_req,
+ sizeof(*gdev->mouse_status_req));
+ vbg_req_free(gdev->ack_events_req,
+ sizeof(*gdev->ack_events_req));
+ vbg_req_free(gdev->cancel_req,
+ sizeof(*gdev->cancel_req));
+ vbg_req_free(gdev->mem_balloon.change_req,
+ sizeof(*gdev->mem_balloon.change_req));
+ vbg_req_free(gdev->mem_balloon.get_req,
+ sizeof(*gdev->mem_balloon.get_req));
return ret;
}
@@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
vbg_reset_host_capabilities(gdev);
vbg_core_set_mouse_status(gdev, 0);
- kfree(gdev->mouse_status_req);
- kfree(gdev->ack_events_req);
- kfree(gdev->cancel_req);
- kfree(gdev->mem_balloon.change_req);
- kfree(gdev->mem_balloon.get_req);
+ vbg_req_free(gdev->mouse_status_req,
+ sizeof(*gdev->mouse_status_req));
+ vbg_req_free(gdev->ack_events_req,
+ sizeof(*gdev->ack_events_req));
+ vbg_req_free(gdev->cancel_req,
+ sizeof(*gdev->cancel_req));
+ vbg_req_free(gdev->mem_balloon.change_req,
+ sizeof(*gdev->mem_balloon.change_req));
+ vbg_req_free(gdev->mem_balloon.get_req,
+ sizeof(*gdev->mem_balloon.get_req));
}
/**
@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
req->flags = dump->u.in.flags;
dump->hdr.rc = vbg_req_perform(gdev, req);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return 0;
}
@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
- kfree(req);
+ vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 6c784bf4fa6d..7ad9ec45bfa9 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
+/* Private (non exported) functions form vboxguest_utils.c */
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void vbg_req_free(void *req, size_t len);
+int vbg_req_perform(struct vbg_dev *gdev, void *req);
+int vbg_hgcm_call32(
+ struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+ int *vbox_status);
+
#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 82e280d38cc2..398d22693234 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
struct vbg_session *session = filp->private_data;
size_t returned_size, size;
struct vbg_ioctl_hdr hdr;
+ bool is_vmmdev_req;
int ret = 0;
void *buf;
@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
if (size > SZ_16M)
return -E2BIG;
- /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
- buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
+ /*
+ * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
+ * the need for a bounce-buffer and another copy later on.
+ */
+ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
+
+ if (is_vmmdev_req)
+ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+ else
+ buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
ret = -EFAULT;
out:
- kfree(buf);
+ if (is_vmmdev_req)
+ vbg_req_free(buf, size);
+ else
+ kfree(buf);
return ret;
}
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 0f0dab8023cf..bf4474214b4d 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
{
struct vmmdev_request_header *req;
+ int order = get_order(PAGE_ALIGN(len));
- req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+ req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
if (!req)
return NULL;
@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
return req;
}
+void vbg_req_free(void *req, size_t len)
+{
+ if (!req)
+ return;
+
+ free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
+}
+
/* Note this function returns a VBox status code, not a negative errno!! */
int vbg_req_perform(struct vbg_dev *gdev, void *req)
{
@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
rc = hgcm_connect->header.result;
}
- kfree(hgcm_connect);
+ vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
*vbox_status = rc;
return 0;
@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
if (rc >= 0)
rc = hgcm_disconnect->header.result;
- kfree(hgcm_disconnect);
+ vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
*vbox_status = rc;
return 0;
@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
}
if (!leak_it)
- kfree(call);
+ vbg_req_free(call, size);
free_bounce_bufs:
if (bounce_bufs) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 66a7f5a2f474..9af07fd92763 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -514,6 +514,17 @@ config COH901327_WATCHDOG
This watchdog is used to reset the system and thus cannot be
compiled as a module.
+config NPCM7XX_WATCHDOG
+ bool "Nuvoton NPCM750 watchdog"
+ depends on ARCH_NPCM || COMPILE_TEST
+ default y if ARCH_NPCM750
+ select WATCHDOG_CORE
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog embedded into the NPCM7xx.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
config TWL4030_WATCHDOG
tristate "TWL4030 Watchdog"
depends on TWL4030_CORE
@@ -1102,6 +1113,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
+ select WATCHDOG_CORE
depends on X86 && PCI
help
A software monitoring watchdog and NMI sourcing driver. This driver
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index e4dd91f5585a..1d3c6b094fe5 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
+obj-$(CONFIG_NPCM7XX_WATCHDOG) += npcm_wdt.o
obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 6d5ae251e309..ee1ab12ab04f 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/watchdog/ar7_wdt.c
*
@@ -8,19 +9,6 @@
* National Semiconductor SCx200 Watchdog support
* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index 7dd0da644a7f..2cf56b459d84 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
- ret = asm9260_wdt_get_dt_clks(priv);
- if (ret)
- return ret;
-
priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
+ ret = asm9260_wdt_get_dt_clks(priv);
+ if (ret)
+ return ret;
+
wdd = &priv->wdd;
wdd->info = &asm9260_wdt_ident;
wdd->ops = &asm9260_wdt_ops;
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index ca5b91e2eb92..1abe4d021fd2 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_RELOAD_VALUE 0x04
#define WDT_RESTART 0x08
#define WDT_CTRL 0x0C
+#define WDT_CTRL_BOOT_SECONDARY BIT(7)
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
#define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
@@ -54,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_CTRL_WDT_INTR BIT(2)
#define WDT_CTRL_RESET_SYSTEM BIT(1)
#define WDT_CTRL_ENABLE BIT(0)
+#define WDT_TIMEOUT_STATUS 0x10
+#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
/*
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -158,6 +161,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
mdelay(1000);
@@ -190,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
struct device_node *np;
const char *reset_type;
u32 duration;
+ u32 status;
int ret;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -232,16 +237,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
} else {
if (!strcmp(reset_type, "cpu"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
+ WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "soc"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
+ WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "system"))
- wdt->ctrl |= WDT_CTRL_RESET_SYSTEM;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
+ WDT_CTRL_RESET_SYSTEM;
else if (strcmp(reset_type, "none"))
return -EINVAL;
}
if (of_property_read_bool(np, "aspeed,external-signal"))
wdt->ctrl |= WDT_CTRL_WDT_EXT;
+ if (of_property_read_bool(np, "aspeed,alt-boot"))
+ wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
/*
@@ -300,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
+ status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+ if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
if (ret) {
dev_err(&pdev->dev, "failed to register\n");
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index e12a797cb820..b45fc0aee667 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Atmel AT91RM9200 (Thunder)
*
* Copyright (C) 2003 SAN People (Pty) Ltd
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 88c05d0448b2..f4050a229eb5 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Atmel AT91SAM9x processors.
*
* Copyright (C) 2008 Renaud CERRATO r.cerrato@til-technologies.fr
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
/*
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index b79a83b467ce..390941c65eee 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/watchdog/at91sam9_wdt.h
*
@@ -7,10 +8,6 @@
* Watchdog Timer (WDT) - System peripherals regsters.
* Based on AT91SAM9261 datasheet revision D.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_WDT_H
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index b339e0e67b4c..ed05514cc2dc 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Broadcom BCM2835
*
@@ -7,10 +8,6 @@
*
* Copyright (C) 2013 Lubomir Rintel <lkundrak@v3.sk>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/delay.h>
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index f41b756d6dd5..05425c1dfd4c 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Broadcom BCM47XX
*
@@ -5,10 +6,6 @@
* Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
* Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8555afc70f9b..d3c1113e774c 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom BCM63xx SoC watchdog driver
*
* Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com>
* Copyright (C) 2008, Florian Fainelli <florian@openwrt.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index f88f546e8050..ce3f646e8077 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -235,6 +227,6 @@ module_platform_driver(bcm7038_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for Broadcom 7038 SoCs Watchdog");
MODULE_AUTHOR("Justin Chen");
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index a5775dfd8d5f..1462be9e6fc5 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/debugfs.h>
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 064cf7b6c1c5..3ec1f418837d 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence WDT driver - Used by Xilinx Zynq
*
* Copyright (C) 2010 - 2014 Xilinx, Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 4410337f4f7f..e3a78f927f83 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* coh901327_wdt.c
*
* Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
@@ -67,7 +67,9 @@
#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
/* Default timeout in seconds = 1 minute */
-static unsigned int margin = 60;
+#define U300_WDOG_DEFAULT_TIMEOUT 60
+
+static unsigned int margin;
static int irq;
static void __iomem *virtbase;
static struct device *parent;
@@ -235,8 +237,9 @@ static struct watchdog_device coh901327_wdt = {
* timeout register is max
* 0x7FFF = 327670ms ~= 327s.
*/
- .min_timeout = 0,
+ .min_timeout = 1,
.max_timeout = 327,
+ .timeout = U300_WDOG_DEFAULT_TIMEOUT,
};
static int __exit coh901327_remove(struct platform_device *pdev)
@@ -315,16 +318,15 @@ static int __init coh901327_probe(struct platform_device *pdev)
goto out_no_irq;
}
- ret = watchdog_init_timeout(&coh901327_wdt, margin, dev);
- if (ret < 0)
- coh901327_wdt.timeout = 60;
+ watchdog_init_timeout(&coh901327_wdt, margin, dev);
coh901327_wdt.parent = dev;
ret = watchdog_register_device(&coh901327_wdt);
if (ret)
goto out_no_wdog;
- dev_info(dev, "initialized. timer margin=%d sec\n", margin);
+ dev_info(dev, "initialized. (timeout=%d sec)\n",
+ coh901327_wdt.timeout);
return 0;
out_no_wdog:
@@ -419,5 +421,5 @@ MODULE_DESCRIPTION("COH 901 327 Watchdog");
module_param(margin, uint, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:coh901327-watchdog");
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index d6d5006efa71..e263bad99574 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* System monitoring driver for DA9052 PMICs.
*
@@ -5,11 +6,6 @@
*
* Author: Anthony Olech <Anthony.Olech@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 50bdd1022186..26a5b2984094 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* System monitoring driver for DA9055 PMICs.
*
@@ -5,11 +6,6 @@
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 814dff6045a4..a001782bbfdb 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog device driver for DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 2a20fc163ed0..b17ac1bb1f28 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for DA9063 PMICs.
*
@@ -5,10 +6,6 @@
*
* Author: Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 3e4c592c239f..6c6594261cb7 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -236,15 +236,22 @@ static int davinci_wdt_probe(struct platform_device *pdev)
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(davinci_wdt->base))
- return PTR_ERR(davinci_wdt->base);
+ if (IS_ERR(davinci_wdt->base)) {
+ ret = PTR_ERR(davinci_wdt->base);
+ goto err_clk_disable;
+ }
ret = watchdog_register_device(wdd);
- if (ret < 0) {
- clk_disable_unprepare(davinci_wdt->clk);
+ if (ret) {
dev_err(dev, "cannot register watchdog device\n");
+ goto err_clk_disable;
}
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(davinci_wdt->clk);
+
return ret;
}
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 5e4ef93caa02..a9e11df155b8 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Conexant Digicolor
*
* Copyright (C) 2015 Paradox Innovation Ltd.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/types.h>
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index c2f4ff516230..501aebb5b81f 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -34,6 +34,7 @@
#define WDOG_CONTROL_REG_OFFSET 0x00
#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
+#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
#define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
@@ -56,6 +57,9 @@ struct dw_wdt {
unsigned long rate;
struct watchdog_device wdd;
struct reset_control *rst;
+ /* Save/restore */
+ u32 control;
+ u32 timeout;
};
#define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
@@ -121,14 +125,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
return 0;
}
+static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
+{
+ u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+ /* Disable interrupt mode; always perform system reset. */
+ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+ /* Enable watchdog. */
+ val |= WDOG_CONTROL_REG_WDT_EN_MASK;
+ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+}
+
static int dw_wdt_start(struct watchdog_device *wdd)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
dw_wdt_set_timeout(wdd, wdd->timeout);
-
- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt_arm_system_reset(dw_wdt);
return 0;
}
@@ -152,16 +165,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
unsigned long action, void *data)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
- u32 val;
writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
- val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
- if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
+ if (dw_wdt_is_enabled(dw_wdt))
writel(WDOG_COUNTER_RESTART_KICK_VALUE,
dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
else
- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt_arm_system_reset(dw_wdt);
/* wait for reset to assert... */
mdelay(500);
@@ -198,6 +208,9 @@ static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
+ dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+
clk_disable_unprepare(dw_wdt->clk);
return 0;
@@ -211,6 +224,9 @@ static int dw_wdt_resume(struct device *dev)
if (err)
return err;
+ writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
dw_wdt_ping(&dw_wdt->wdd);
return 0;
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
index 2170b275ea01..4c4c8ce78021 100644
--- a/drivers/watchdog/ebc-c384_wdt.c
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog timer driver for the WinSystems EBC-C384
* Copyright (C) 2016 William Breathitt Gray
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 3a33c5344bd5..9a1c761258ce 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -496,7 +496,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
- & F71808FG_FLAG_WD_EN);
+ & BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 3ade28190341..ea77cae03c9d 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -152,9 +152,9 @@ static int gpio_wdt_probe(struct platform_device *pdev)
priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
priv->wdd.max_hw_heartbeat_ms = hw_margin;
priv->wdd.parent = dev;
+ priv->wdd.timeout = SOFT_TIMEOUT_DEF;
- if (watchdog_init_timeout(&priv->wdd, 0, dev) < 0)
- priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+ watchdog_init_timeout(&priv->wdd, 0, &pdev->dev);
watchdog_stop_on_reboot(&priv->wdd);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index b0a158073abd..a43ab2cecca2 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -4,7 +4,7 @@
*
* SoftDog 0.05: A Software Watchdog Device
*
- * (c) Copyright 2015 Hewlett Packard Enterprise Development LP
+ * (c) Copyright 2018 Hewlett Packard Enterprise Development LP
* Thomas Mingarelli <thomas.mingarelli@hpe.com>
*
* This program is free software; you can redistribute it and/or
@@ -16,34 +16,27 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/io.h>
-#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.4.0"
+#define HPWDT_VERSION "2.0.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
#define DEFAULT_MARGIN 30
+#define PRETIMEOUT_SEC 9
+static bool ilo5;
static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
-static unsigned int reload; /* the computed soft_margin */
static bool nowayout = WATCHDOG_NOWAYOUT;
-#ifdef CONFIG_HPWDT_NMI_DECODING
-static unsigned int allow_kdump = 1;
-#endif
-static char expect_release;
-static unsigned long hpwdt_is_open;
+static bool pretimeout = IS_ENABLED(CONFIG_HPWDT_NMI_DECODING);
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_nmistat;
@@ -61,48 +54,92 @@ MODULE_DEVICE_TABLE(pci, hpwdt_devices);
/*
* Watchdog operations
*/
-static void hpwdt_start(void)
+static int hpwdt_start(struct watchdog_device *wdd)
{
- reload = SECS_TO_TICKS(soft_margin);
+ int control = 0x81 | (pretimeout ? 0x4 : 0);
+ int reload = SECS_TO_TICKS(wdd->timeout);
+
+ dev_dbg(wdd->parent, "start watchdog 0x%08x:0x%02x\n", reload, control);
iowrite16(reload, hpwdt_timer_reg);
- iowrite8(0x85, hpwdt_timer_con);
+ iowrite8(control, hpwdt_timer_con);
+
+ return 0;
}
static void hpwdt_stop(void)
{
unsigned long data;
+ pr_debug("stop watchdog\n");
+
data = ioread8(hpwdt_timer_con);
data &= 0xFE;
iowrite8(data, hpwdt_timer_con);
}
-static void hpwdt_ping(void)
+static int hpwdt_stop_core(struct watchdog_device *wdd)
{
- iowrite16(reload, hpwdt_timer_reg);
+ hpwdt_stop();
+
+ return 0;
}
-static int hpwdt_change_timer(int new_margin)
+static int hpwdt_ping(struct watchdog_device *wdd)
{
- if (new_margin < 1 || new_margin > HPWDT_MAX_TIMER) {
- pr_warn("New value passed in is invalid: %d seconds\n",
- new_margin);
- return -EINVAL;
- }
+ int reload = SECS_TO_TICKS(wdd->timeout);
- soft_margin = new_margin;
- pr_debug("New timer passed in is %d seconds\n", new_margin);
- reload = SECS_TO_TICKS(soft_margin);
+ dev_dbg(wdd->parent, "ping watchdog 0x%08x\n", reload);
+ iowrite16(reload, hpwdt_timer_reg);
return 0;
}
-static int hpwdt_time_left(void)
+static unsigned int hpwdt_gettimeleft(struct watchdog_device *wdd)
{
return TICKS_TO_SECS(ioread16(hpwdt_timer_reg));
}
+static int hpwdt_settimeout(struct watchdog_device *wdd, unsigned int val)
+{
+ dev_dbg(wdd->parent, "set_timeout = %d\n", val);
+
+ wdd->timeout = val;
+ if (val <= wdd->pretimeout) {
+ dev_dbg(wdd->parent, "pretimeout < timeout. Setting to zero\n");
+ wdd->pretimeout = 0;
+ pretimeout = 0;
+ if (watchdog_active(wdd))
+ hpwdt_start(wdd);
+ }
+ hpwdt_ping(wdd);
+
+ return 0;
+}
+
#ifdef CONFIG_HPWDT_NMI_DECODING
+static int hpwdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
+{
+ unsigned int val = 0;
+
+ dev_dbg(wdd->parent, "set_pretimeout = %d\n", req);
+ if (req) {
+ val = PRETIMEOUT_SEC;
+ if (val >= wdd->timeout)
+ return -EINVAL;
+ }
+
+ if (val != req)
+ dev_dbg(wdd->parent, "Rounding pretimeout to: %d\n", val);
+
+ wdd->pretimeout = val;
+ pretimeout = !!val;
+
+ if (watchdog_active(wdd))
+ hpwdt_start(wdd);
+
+ return 0;
+}
+
static int hpwdt_my_nmi(void)
{
return ioread8(hpwdt_nmistat) & 0x6;
@@ -113,178 +150,71 @@ static int hpwdt_my_nmi(void)
*/
static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
{
- if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
- return NMI_DONE;
-
- if (allow_kdump)
- hpwdt_stop();
-
- nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
- "for the NMI is logged in any one of the following "
- "resources:\n"
+ unsigned int mynmi = hpwdt_my_nmi();
+ static char panic_msg[] =
+ "00: An NMI occurred. Depending on your system the reason "
+ "for the NMI is logged in any one of the following resources:\n"
"1. Integrated Management Log (IML)\n"
"2. OA Syslog\n"
"3. OA Forward Progress Log\n"
- "4. iLO Event Log");
-
- return NMI_HANDLED;
-}
-#endif /* CONFIG_HPWDT_NMI_DECODING */
-
-/*
- * /dev/watchdog handling
- */
-static int hpwdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &hpwdt_is_open))
- return -EBUSY;
+ "4. iLO Event Log";
- /* Start the watchdog */
- hpwdt_start();
- hpwdt_ping();
-
- return nonseekable_open(inode, file);
-}
+ if (ilo5 && ulReason == NMI_UNKNOWN && mynmi)
+ return NMI_DONE;
-static int hpwdt_release(struct inode *inode, struct file *file)
-{
- /* Stop the watchdog */
- if (expect_release == 42) {
- hpwdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- hpwdt_ping();
- }
+ if (ilo5 && !pretimeout)
+ return NMI_DONE;
- expect_release = 0;
+ hpwdt_stop();
- /* /dev/watchdog is being closed, make sure it can be re-opened */
- clear_bit(0, &hpwdt_is_open);
+ hex_byte_pack(panic_msg, mynmi);
+ nmi_panic(regs, panic_msg);
- return 0;
+ return NMI_HANDLED;
}
+#endif /* CONFIG_HPWDT_NMI_DECODING */
-static ssize_t hpwdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- /* See if we got the magic character 'V' and reload the timer */
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* note: just in case someone wrote the magic character
- * five months ago... */
- expect_release = 0;
-
- /* scan to see whether or not we got the magic char. */
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_release = 42;
- }
- }
-
- /* someone wrote to us, we should reload the timer */
- hpwdt_ping();
- }
-
- return len;
-}
static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT |
+ .options = WDIOF_PRETIMEOUT |
+ WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.identity = "HPE iLO2+ HW Watchdog Timer",
};
-static long hpwdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_margin, options;
- int ret = -ENOTTY;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = 0;
- if (copy_to_user(argp, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, p);
- break;
-
- case WDIOC_KEEPALIVE:
- hpwdt_ping();
- ret = 0;
- break;
-
- case WDIOC_SETOPTIONS:
- ret = get_user(options, p);
- if (ret)
- break;
-
- if (options & WDIOS_DISABLECARD)
- hpwdt_stop();
-
- if (options & WDIOS_ENABLECARD) {
- hpwdt_start();
- hpwdt_ping();
- }
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(new_margin, p);
- if (ret)
- break;
-
- ret = hpwdt_change_timer(new_margin);
- if (ret)
- break;
-
- hpwdt_ping();
- /* Fall */
- case WDIOC_GETTIMEOUT:
- ret = put_user(soft_margin, p);
- break;
-
- case WDIOC_GETTIMELEFT:
- ret = put_user(hpwdt_time_left(), p);
- break;
- }
- return ret;
-}
-
/*
* Kernel interfaces
*/
-static const struct file_operations hpwdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = hpwdt_write,
- .unlocked_ioctl = hpwdt_ioctl,
- .open = hpwdt_open,
- .release = hpwdt_release,
+
+static const struct watchdog_ops hpwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = hpwdt_start,
+ .stop = hpwdt_stop_core,
+ .ping = hpwdt_ping,
+ .set_timeout = hpwdt_settimeout,
+ .get_timeleft = hpwdt_gettimeleft,
+#ifdef CONFIG_HPWDT_NMI_DECODING
+ .set_pretimeout = hpwdt_set_pretimeout,
+#endif
};
-static struct miscdevice hpwdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &hpwdt_fops,
+static struct watchdog_device hpwdt_dev = {
+ .info = &ident,
+ .ops = &hpwdt_ops,
+ .min_timeout = 1,
+ .max_timeout = HPWDT_MAX_TIMER,
+ .timeout = DEFAULT_MARGIN,
+#ifdef CONFIG_HPWDT_NMI_DECODING
+ .pretimeout = PRETIMEOUT_SEC,
+#endif
};
+
/*
* Init & Exit
*/
-
static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
#ifdef CONFIG_HPWDT_NMI_DECODING
@@ -303,9 +233,8 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
goto error2;
dev_info(&dev->dev,
- "HPE Watchdog Timer Driver: NMI decoding initialized"
- ", allow kernel dump: %s (default = 1/ON)\n",
- (allow_kdump == 0) ? "OFF" : "ON");
+ "HPE Watchdog Timer Driver: NMI decoding initialized\n");
+
return 0;
error2:
@@ -375,29 +304,32 @@ static int hpwdt_init_one(struct pci_dev *dev,
/* Make sure that timer is disabled until /dev/watchdog is opened */
hpwdt_stop();
- /* Make sure that we have a valid soft_margin */
- if (hpwdt_change_timer(soft_margin))
- hpwdt_change_timer(DEFAULT_MARGIN);
-
/* Initialize NMI Decoding functionality */
retval = hpwdt_init_nmi_decoding(dev);
if (retval != 0)
goto error_init_nmi_decoding;
- retval = misc_register(&hpwdt_miscdev);
+ watchdog_set_nowayout(&hpwdt_dev, nowayout);
+ if (watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL))
+ dev_warn(&dev->dev, "Invalid soft_margin: %d.\n", soft_margin);
+
+ hpwdt_dev.parent = &dev->dev;
+ retval = watchdog_register_device(&hpwdt_dev);
if (retval < 0) {
- dev_warn(&dev->dev,
- "Unable to register miscdev on minor=%d (err=%d).\n",
- WATCHDOG_MINOR, retval);
- goto error_misc_register;
+ dev_err(&dev->dev, "watchdog register failed: %d.\n", retval);
+ goto error_wd_register;
}
dev_info(&dev->dev, "HPE Watchdog Timer Driver: %s"
", timer margin: %d seconds (nowayout=%d).\n",
- HPWDT_VERSION, soft_margin, nowayout);
+ HPWDT_VERSION, hpwdt_dev.timeout, nowayout);
+
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_HP_3PAR)
+ ilo5 = true;
+
return 0;
-error_misc_register:
+error_wd_register:
hpwdt_exit_nmi_decoding();
error_init_nmi_decoding:
pci_iounmap(dev, pci_mem_addr);
@@ -411,7 +343,7 @@ static void hpwdt_exit(struct pci_dev *dev)
if (!nowayout)
hpwdt_stop();
- misc_deregister(&hpwdt_miscdev);
+ watchdog_unregister_device(&hpwdt_dev);
hpwdt_exit_nmi_decoding();
pci_iounmap(dev, pci_mem_addr);
pci_disable_device(dev);
@@ -425,7 +357,7 @@ static struct pci_driver hpwdt_driver = {
};
MODULE_AUTHOR("Tom Mingarelli");
-MODULE_DESCRIPTION("hp watchdog driver");
+MODULE_DESCRIPTION("hpe watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HPWDT_VERSION);
@@ -437,8 +369,8 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#ifdef CONFIG_HPWDT_NMI_DECODING
-module_param(allow_kdump, int, 0);
-MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
-#endif /* CONFIG_HPWDT_NMI_DECODING */
+module_param(pretimeout, bool, 0);
+MODULE_PARM_DESC(pretimeout, "Watchdog pretimeout enabled");
+#endif
module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 518dfa1047cb..f07850d2c977 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -76,7 +76,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned timeout = IMX2_WDT_DEFAULT_TIME;
+static unsigned timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
__MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")");
@@ -281,6 +281,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->info = &imx2_wdt_info;
wdog->ops = &imx2_wdt_ops;
wdog->min_timeout = 1;
+ wdog->timeout = IMX2_WDT_DEFAULT_TIME;
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
wdog->parent = &pdev->dev;
@@ -299,11 +300,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
"fsl,ext-reset-output");
- wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
- if (wdog->timeout != timeout)
- dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n",
- timeout, wdog->timeout);
-
platform_set_drvdata(pdev, wdog);
watchdog_set_drvdata(wdog, wdev);
watchdog_set_nowayout(wdog, nowayout);
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index b4221f43cd94..331cadb459ac 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -265,7 +265,7 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
lpc18xx_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
- ret = watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
+ watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index b8194b02abe0..8023cf28657a 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
@@ -687,5 +679,5 @@ static struct mei_cl_driver mei_wdt_driver = {
module_mei_cl_driver(mei_wdt_driver);
MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 045201a6fdb3..25d5d2b8cfbe 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the A21 VME CPU Boards
*
* Copyright (C) 2013 MEN Mikro Elektronik Nuernberg GmbH
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 69a5a57f1446..69adeab3fde7 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -1,56 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index 304274c67735..cd0275a6cdac 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -36,7 +36,7 @@
#define MESON_SEC_TO_TC(s, c) ((s) * (c))
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = MESON_WDT_TIMEOUT;
+static unsigned int timeout;
struct meson_wdt_data {
unsigned int enable;
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 7ed417a765c7..4baf64f21aa1 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Mediatek Watchdog Driver
*
@@ -5,16 +6,6 @@
*
* Matthias Brugger <matthias.bgg@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based on sunxi_wdt.c
*/
@@ -57,7 +48,7 @@
#define DRV_VERSION "1.0"
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = WDT_MAX_TIMEOUT;
+static unsigned int timeout;
struct mtk_wdt_dev {
struct watchdog_device wdt_dev;
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ca360d204548..1fa7d2b32494 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the MTX-1 Watchdog.
*
@@ -6,16 +7,6 @@
* http://www.4g-systems.biz
*
* (C) Copyright 2007 OpenWrt.org, Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Neither Michael Stickel nor 4G Systems admit liability nor provide
- * warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- *
* (c) Copyright 2005 4G Systems <info@4g-systems.biz>
*
* Release 0.01.
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
new file mode 100644
index 000000000000..0d4213652ecc
--- /dev/null
+++ b/drivers/watchdog/npcm_wdt.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Nuvoton Technology corporation.
+// Copyright (c) 2018 IBM Corp.
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+#define NPCM_WTCR 0x1C
+
+#define NPCM_WTCLK (BIT(10) | BIT(11)) /* Clock divider */
+#define NPCM_WTE BIT(7) /* Enable */
+#define NPCM_WTIE BIT(6) /* Enable irq */
+#define NPCM_WTIS (BIT(4) | BIT(5)) /* Interval selection */
+#define NPCM_WTIF BIT(3) /* Interrupt flag*/
+#define NPCM_WTRF BIT(2) /* Reset flag */
+#define NPCM_WTRE BIT(1) /* Reset enable */
+#define NPCM_WTR BIT(0) /* Reset counter */
+
+/*
+ * Watchdog timeouts
+ *
+ * 170 msec: WTCLK=01 WTIS=00 VAL= 0x400
+ * 670 msec: WTCLK=01 WTIS=01 VAL= 0x410
+ * 1360 msec: WTCLK=10 WTIS=00 VAL= 0x800
+ * 2700 msec: WTCLK=01 WTIS=10 VAL= 0x420
+ * 5360 msec: WTCLK=10 WTIS=01 VAL= 0x810
+ * 10700 msec: WTCLK=01 WTIS=11 VAL= 0x430
+ * 21600 msec: WTCLK=10 WTIS=10 VAL= 0x820
+ * 43000 msec: WTCLK=11 WTIS=00 VAL= 0xC00
+ * 85600 msec: WTCLK=10 WTIS=11 VAL= 0x830
+ * 172000 msec: WTCLK=11 WTIS=01 VAL= 0xC10
+ * 687000 msec: WTCLK=11 WTIS=10 VAL= 0xC20
+ * 2750000 msec: WTCLK=11 WTIS=11 VAL= 0xC30
+ */
+
+struct npcm_wdt {
+ struct watchdog_device wdd;
+ void __iomem *reg;
+};
+
+static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct npcm_wdt, wdd);
+}
+
+static int npcm_wdt_ping(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ u32 val;
+
+ val = readl(wdt->reg);
+ writel(val | NPCM_WTR, wdt->reg);
+
+ return 0;
+}
+
+static int npcm_wdt_start(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ u32 val;
+
+ if (wdd->timeout < 2)
+ val = 0x800;
+ else if (wdd->timeout < 3)
+ val = 0x420;
+ else if (wdd->timeout < 6)
+ val = 0x810;
+ else if (wdd->timeout < 11)
+ val = 0x430;
+ else if (wdd->timeout < 22)
+ val = 0x820;
+ else if (wdd->timeout < 44)
+ val = 0xC00;
+ else if (wdd->timeout < 87)
+ val = 0x830;
+ else if (wdd->timeout < 173)
+ val = 0xC10;
+ else if (wdd->timeout < 688)
+ val = 0xC20;
+ else
+ val = 0xC30;
+
+ val |= NPCM_WTRE | NPCM_WTE | NPCM_WTR | NPCM_WTIE;
+
+ writel(val, wdt->reg);
+
+ return 0;
+}
+
+static int npcm_wdt_stop(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ writel(0, wdt->reg);
+
+ return 0;
+}
+
+
+static int npcm_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ if (timeout < 2)
+ wdd->timeout = 1;
+ else if (timeout < 3)
+ wdd->timeout = 2;
+ else if (timeout < 6)
+ wdd->timeout = 5;
+ else if (timeout < 11)
+ wdd->timeout = 10;
+ else if (timeout < 22)
+ wdd->timeout = 21;
+ else if (timeout < 44)
+ wdd->timeout = 43;
+ else if (timeout < 87)
+ wdd->timeout = 86;
+ else if (timeout < 173)
+ wdd->timeout = 172;
+ else if (timeout < 688)
+ wdd->timeout = 687;
+ else
+ wdd->timeout = 2750;
+
+ if (watchdog_active(wdd))
+ npcm_wdt_start(wdd);
+
+ return 0;
+}
+
+static irqreturn_t npcm_wdt_interrupt(int irq, void *data)
+{
+ struct npcm_wdt *wdt = data;
+
+ watchdog_notify_pretimeout(&wdt->wdd);
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
+ udelay(1000);
+
+ return 0;
+}
+
+static bool npcm_is_running(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ return readl(wdt->reg) & NPCM_WTE;
+}
+
+static const struct watchdog_info npcm_wdt_info = {
+ .identity = KBUILD_MODNAME,
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops npcm_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = npcm_wdt_start,
+ .stop = npcm_wdt_stop,
+ .ping = npcm_wdt_ping,
+ .set_timeout = npcm_wdt_set_timeout,
+ .restart = npcm_wdt_restart,
+};
+
+static int npcm_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npcm_wdt *wdt;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdt->reg))
+ return PTR_ERR(wdt->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ wdt->wdd.info = &npcm_wdt_info;
+ wdt->wdd.ops = &npcm_wdt_ops;
+ wdt->wdd.min_timeout = 1;
+ wdt->wdd.max_timeout = 2750;
+ wdt->wdd.parent = dev;
+
+ wdt->wdd.timeout = 86;
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
+
+ /* Ensure timeout is able to be represented by the hardware */
+ npcm_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+
+ if (npcm_is_running(&wdt->wdd)) {
+ /* Restart with the default or device-tree specified timeout */
+ npcm_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
+ ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0,
+ "watchdog", wdt);
+ if (ret)
+ return ret;
+
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
+ if (ret) {
+ dev_err(dev, "failed to register watchdog\n");
+ return ret;
+ }
+
+ dev_info(dev, "NPCM watchdog driver enabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id npcm_wdt_match[] = {
+ {.compatible = "nuvoton,npcm750-wdt"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, npcm_wdt_match);
+#endif
+
+static struct platform_driver npcm_wdt_driver = {
+ .probe = npcm_wdt_probe,
+ .driver = {
+ .name = "npcm-wdt",
+ .of_match_table = of_match_ptr(npcm_wdt_match),
+ },
+};
+module_platform_driver(npcm_wdt_driver);
+
+MODULE_AUTHOR("Joel Stanley");
+MODULE_DESCRIPTION("Watchdog driver for NPCM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 1cf286945b7a..4acbe05e27bb 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
* (C) Copyright 2013 - 2014 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
@@ -323,4 +319,4 @@ module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 1b02bfa81b29..ae77112ce97f 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -253,10 +253,10 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.ops = &omap_wdt_ops;
wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
+ wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
wdev->wdog.parent = &pdev->dev;
- if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
- wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
+ watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev);
watchdog_set_nowayout(&wdev->wdog, nowayout);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 0529aed158a4..8e261799c84e 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -78,7 +78,7 @@
#define WDOG_COUNTER_RATE 13000000 /*the counter clock is 13 MHz fixed */
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int heartbeat = DEFAULT_HEARTBEAT;
+static unsigned int heartbeat;
static DEFINE_SPINLOCK(io_lock);
static void __iomem *wdt_base;
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 831ef83f6de1..514db5cc1595 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -16,6 +16,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/smp.h>
+#include <linux/sys_soc.h>
#include <linux/watchdog.h>
#define RWTCNT 0
@@ -49,6 +51,7 @@ struct rwdt_priv {
void __iomem *base;
struct watchdog_device wdev;
unsigned long clk_rate;
+ u16 time_left;
u8 cks;
};
@@ -107,8 +110,19 @@ static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev)
return DIV_BY_CLKS_PER_SEC(priv, 65536 - val);
}
+static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
+ void *data)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rwdt_start(wdev);
+ rwdt_write(priv, 0xffff, RWTCNT);
+ return 0;
+}
+
static const struct watchdog_info rwdt_ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_CARDRESET,
.identity = "Renesas WDT Watchdog",
};
@@ -118,8 +132,47 @@ static const struct watchdog_ops rwdt_ops = {
.stop = rwdt_stop,
.ping = rwdt_init_timeout,
.get_timeleft = rwdt_get_timeleft,
+ .restart = rwdt_restart,
+};
+
+#if defined(CONFIG_ARCH_RCAR_GEN2) && defined(CONFIG_SMP)
+/*
+ * Watchdog-reset integration is broken on early revisions of R-Car Gen2 SoCs
+ */
+static const struct soc_device_attribute rwdt_quirks_match[] = {
+ {
+ .soc_id = "r8a7790",
+ .revision = "ES1.*",
+ .data = (void *)1, /* needs single CPU */
+ }, {
+ .soc_id = "r8a7791",
+ .revision = "ES[12].*",
+ .data = (void *)1, /* needs single CPU */
+ }, {
+ .soc_id = "r8a7792",
+ .revision = "*",
+ .data = (void *)0, /* needs SMP disabled */
+ },
+ { /* sentinel */ }
};
+static bool rwdt_blacklisted(struct device *dev)
+{
+ const struct soc_device_attribute *attr;
+
+ attr = soc_device_match(rwdt_quirks_match);
+ if (attr && setup_max_cpus > (uintptr_t)attr->data) {
+ dev_info(dev, "Watchdog blacklisted on %s %s\n", attr->soc_id,
+ attr->revision);
+ return true;
+ }
+
+ return false;
+}
+#else /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */
+static inline bool rwdt_blacklisted(struct device *dev) { return false; }
+#endif /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */
+
static int rwdt_probe(struct platform_device *pdev)
{
struct rwdt_priv *priv;
@@ -128,6 +181,9 @@ static int rwdt_probe(struct platform_device *pdev)
unsigned long clks_per_sec;
int ret, i;
+ if (rwdt_blacklisted(&pdev->dev))
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -142,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev)
return PTR_ERR(clk);
pm_runtime_enable(&pdev->dev);
-
pm_runtime_get_sync(&pdev->dev);
priv->clk_rate = clk_get_rate(clk);
+ priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
+ RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
pm_runtime_put(&pdev->dev);
if (!priv->clk_rate) {
@@ -176,6 +233,7 @@ static int rwdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_set_restart_priority(&priv->wdev, 0);
/* This overrides the default timeout only if DT configuration was found */
ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
@@ -203,12 +261,32 @@ static int rwdt_remove(struct platform_device *pdev)
return 0;
}
-/*
- * This driver does also fit for R-Car Gen2 (r8a779[0-4]) WDT. However, for SMP
- * to work there, one also needs a RESET (RST) driver which does not exist yet
- * due to HW issues. This needs to be solved before adding compatibles here.
- */
+static int __maybe_unused rwdt_suspend(struct device *dev)
+{
+ struct rwdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev)) {
+ priv->time_left = readw(priv->base + RWTCNT);
+ rwdt_stop(&priv->wdev);
+ }
+ return 0;
+}
+
+static int __maybe_unused rwdt_resume(struct device *dev)
+{
+ struct rwdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev)) {
+ rwdt_start(&priv->wdev);
+ rwdt_write(priv, priv->time_left, RWTCNT);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
+
static const struct of_device_id rwdt_ids[] = {
+ { .compatible = "renesas,rcar-gen2-wdt", },
{ .compatible = "renesas,rcar-gen3-wdt", },
{ /* sentinel */ }
};
@@ -218,6 +296,7 @@ static struct platform_driver rwdt_driver = {
.driver = {
.name = "renesas_wdt",
.of_match_table = rwdt_ids,
+ .pm = &rwdt_pm_ops,
},
.probe = rwdt_probe,
.remove = rwdt_remove,
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index 0ae947c3d7bc..255169916dbb 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -33,7 +33,7 @@ struct sama5d4_wdt {
unsigned long last_ping;
};
-static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
+static int wdt_timeout;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_timeout, int, 0);
@@ -212,7 +212,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
wdd = &wdt->wdd;
- wdd->timeout = wdt_timeout;
+ wdd->timeout = WDT_DEFAULT_TIMEOUT;
wdd->info = &sama5d4_wdt_info;
wdd->ops = &sama5d4_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
@@ -273,7 +273,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
- wdt_timeout, nowayout);
+ wdd->timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 43d0cbb7ba0b..814cdf539b0f 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
if (sch311x_wdt_set_heartbeat(new_timeout))
return -EINVAL;
sch311x_wdt_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 4eea351e09b0..ac0c9d2c4aee 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -29,7 +29,7 @@
#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */
#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */
-static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT;
+static unsigned int timeout;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(timeout, uint, 0);
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index a8b280ff33e0..b4d484a42b70 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -154,8 +154,10 @@ static int sprd_wdt_enable(struct sprd_wdt *wdt)
if (ret)
return ret;
ret = clk_prepare_enable(wdt->rtc_enable);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(wdt->enable);
return ret;
+ }
sprd_wdt_unlock(wdt->base);
val = readl_relaxed(wdt->base + SPRD_WDT_CTRL);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index e6100e447dd8..177829b379da 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ST's LPC Watchdog
*
@@ -5,11 +6,6 @@
*
* Author: David Paris <david.paris@st.com> for STMicroelectronics
* Lee Jones <lee.jones@linaro.org> for STMicroelectronics
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 802e31b1416d..c6c73656997e 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -39,7 +39,7 @@
#define DRV_VERSION "1.0"
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = WDT_MAX_TIMEOUT;
+static unsigned int timeout;
/*
* This structure stores the register offsets for different variants
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index d5fcce062920..b1de8297fa40 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
* SMP86xx/SMP87xx Watchdog driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 9403c08816e3..877dd39bd41f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/uniphier_wdt.c b/drivers/watchdog/uniphier_wdt.c
index 0ea2339d9702..e20a7a459d69 100644
--- a/drivers/watchdog/uniphier_wdt.c
+++ b/drivers/watchdog/uniphier_wdt.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog driver for the UniPhier watchdog timer
*
* (c) Copyright 2014 Panasonic Corporation
* (c) Copyright 2016 Socionext Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitops.h>
@@ -212,11 +204,10 @@ static int uniphier_wdt_probe(struct platform_device *pdev)
wdev->wdt_dev.ops = &uniphier_wdt_ops;
wdev->wdt_dev.max_timeout = WDT_PERIOD_MAX;
wdev->wdt_dev.min_timeout = WDT_PERIOD_MIN;
+ wdev->wdt_dev.timeout = WDT_DEFAULT_TIMEOUT;
wdev->wdt_dev.parent = dev;
- if (watchdog_init_timeout(&wdev->wdt_dev, timeout, dev) < 0) {
- wdev->wdt_dev.timeout = WDT_DEFAULT_TIMEOUT;
- }
+ watchdog_init_timeout(&wdev->wdt_dev, timeout, dev);
watchdog_set_nowayout(&wdev->wdt_dev, nowayout);
watchdog_stop_on_reboot(&wdev->wdt_dev);
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 20e2bba10400..672b61a7f9a3 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
wdt_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index db0da7ea4fd8..93c5b610e264 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
timeout = new_timeout;
wafwdt_stop();
wafwdt_start();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 1ddc1f742cd4..116c2f47b463 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the wm831x PMICs
*
* Copyright (C) 2009 Wolfson Microelectronics
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 4ab4b8347d45..33c62d51f00a 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the wm8350
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics <linux@wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 89d9744ece61..ed593d1042a6 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev)
struct xen_pcibk_config_quirk *quirk;
int ret = 0;
- quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
+ quirk = kzalloc(sizeof(*quirk), GFP_KERNEL);
if (!quirk) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9e480fdebe1f..59661db144e5 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
dev_dbg(&dev->dev, "pcistub_device_alloc\n");
- psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
+ psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
if (!psdev)
return NULL;
@@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev)
* here and then to call kfree(pci_get_drvdata(psdev->dev)).
*/
dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
- + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
+ + strlen(pci_name(dev)) + 1, GFP_KERNEL);
if (!dev_data) {
err = -ENOMEM;
goto out;
@@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
if (!match) {
- pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
if (!pci_dev_id) {
err = -ENOMEM;
goto out;
@@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func,
}
dev = psdev->dev;
- field = kzalloc(sizeof(*field), GFP_ATOMIC);
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field) {
err = -ENOMEM;
goto out;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0d6d9264d6a9..c3e201025ef0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
{
struct {
struct xsd_sockmsg hdr;
- const char body[16];
+ char body[16];
} msg;
int rc;
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
msg.hdr.len = strlen(reply) + 1;
if (msg.hdr.len > sizeof(msg.body))
return -E2BIG;
+ memcpy(&msg.body, reply, msg.hdr.len);
mutex_lock(&u->reply_mutex);
rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);